From 75a7d89b92630bf666456061772f014749dcc109 Mon Sep 17 00:00:00 2001 From: sugarme Date: Sat, 31 Oct 2020 19:25:32 +1100 Subject: [PATCH] converted to pointer receiver at tensor APIs, tensor and nn sub-packages --- README.md | 1 + gen/gen.ml | 23 +- libtch/c-generated.go | 10367 ++++++----- nn/batch-norm.go | 26 +- nn/conv-transpose.go | 92 +- nn/conv.go | 147 +- nn/func.go | 14 +- nn/init.go | 26 +- nn/layer-norm.go | 20 +- nn/linear.go | 18 +- nn/optimizer.go | 52 +- nn/rnn.go | 56 +- nn/rnn_test.go | 8 +- nn/sequential.go | 50 +- nn/sparse.go | 16 +- nn/sparse_test.go | 2 +- nn/varstore.go | 90 +- nn/varstore_test.go | 2 +- tensor/data.go | 68 +- tensor/image.go | 21 +- tensor/index.go | 22 +- tensor/iter.go | 20 +- tensor/jit.go | 10 +- tensor/jit_test.go | 8 +- tensor/module.go | 12 +- tensor/must-tensor-generated.go | 18078 ++++++++++--------- tensor/optimizer.go | 45 +- tensor/other.go | 6 +- tensor/patch.go | 48 +- tensor/scalar.go | 18 +- tensor/tensor-generated.go | 27836 ++++++++++++++++-------------- tensor/tensor.go | 463 +- 32 files changed, 30763 insertions(+), 26902 deletions(-) diff --git a/README.md b/README.md index 42bb9ae..d05ea1d 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ - **GoTch** is a C++ Libtorch Go binding for developing and implementing deep learning projects in Go. - This package is to create a thin wrapper of Libtorch to make use of its tensor APIs and CUDA support while implementing as much idiomatic Go as possible. +- There are about **1129** auto-generated tensor APIs. ## Dependencies diff --git a/gen/gen.ml b/gen/gen.ml index 6f8f1f2..d48d373 100644 --- a/gen/gen.ml +++ b/gen/gen.ml @@ -1,7 +1,6 @@ (* Automatically generate the C++ -> C -> Go bindings. This takes as input the Descriptions.yaml file that gets generated when func (Func.c_go_args_list func) building PyTorch from source. - Run with: dune exec gen/gen.exe *) open Base @@ -347,15 +346,15 @@ module Func = struct | Bool -> "bool" | Int64 -> "int64" | Double -> "float64" - | Tensor -> "Tensor" - | TensorOption -> "Tensor" + | Tensor -> "*Tensor" + | TensorOption -> "*Tensor" | IntList -> "[]int64" | TensorList -> "[]Tensor" | String -> "string" (* TODO. Struct{Kind gotch.DType Device gotch.Device} *) (* E.g. `type KindDevice struct{}` *) | TensorOptions -> "gotch.KindDevice" - | Scalar -> "Scalar" + | Scalar -> "*Scalar" | ScalarType -> "gotch.DType" | Device -> "gotch.Device" in @@ -396,9 +395,9 @@ module Func = struct (* printf "t name: %s\n" t.name ; *) let returns = match t.returns with - | `fixed 1 -> "retVal Tensor" + | `fixed 1 -> "retVal *Tensor" | `fixed v -> - List.init v ~f:(fun i -> Printf.sprintf "retVal%d Tensor" i) + List.init v ~f:(fun i -> Printf.sprintf "retVal%d *Tensor" i) |> String.concat ~sep:", " |> Printf.sprintf "%s" | `dynamic -> "retVal []Tensor" in @@ -698,7 +697,7 @@ let write_wrapper funcs filename = match func.returns with | `dynamic -> pm "\n" ; - if is_method then pm "func(ts Tensor) %s(" gofunc_name + if is_method then pm "func(ts *Tensor) %s(" gofunc_name else pm "func %s(" gofunc_name ; pm "%s" go_args_list ; pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; @@ -714,13 +713,13 @@ let write_wrapper funcs filename = pm " }\n" ; (* NOTE. if in_place method, no retVal return *) if not (Func.is_inplace func) then - pm " retVal = Tensor{ctensor: *ptr}\n" ; + pm " retVal = &Tensor{ctensor: *ptr}\n" ; pm " \n" ; pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; pm "} \n" | `fixed 1 -> pm "\n" ; - if is_method then pm "func(ts Tensor) %s(" gofunc_name + if is_method then pm "func(ts *Tensor) %s(" gofunc_name else pm "func %s(" gofunc_name ; pm "%s" go_args_list ; pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; @@ -736,7 +735,7 @@ let write_wrapper funcs filename = pm " }\n" ; (* NOTE. if in_place method, no retVal return *) if not (Func.is_inplace func) then - pm " retVal = Tensor{ctensor: *ptr}\n" ; + pm " retVal = &Tensor{ctensor: *ptr}\n" ; pm " \n" ; pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; pm "} \n" @@ -804,7 +803,7 @@ let write_must_wrapper funcs filename = match func.returns with | `dynamic -> pm "\n" ; - if is_method then pm "func(ts Tensor) %s(" gofunc_name + if is_method then pm "func(ts *Tensor) %s(" gofunc_name else pm "func Must%s(" gofunc_name ; pm "%s" go_args_list ; pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; @@ -821,7 +820,7 @@ let write_must_wrapper funcs filename = pm "} \n" | `fixed 1 -> pm "\n" ; - if is_method then pm "func(ts Tensor) Must%s(" gofunc_name + if is_method then pm "func(ts *Tensor) Must%s(" gofunc_name else pm "func Must%s(" gofunc_name ; pm "%s" go_args_list ; pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; diff --git a/libtch/c-generated.go b/libtch/c-generated.go index acd6ab3..e7361bc 100644 --- a/libtch/c-generated.go +++ b/libtch/c-generated.go @@ -2,5486 +2,5485 @@ package libtch // NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! -//#include "stdbool.h" -//#include "torch_api.h" +//#include "stdbool.h" +//#include "torch_api.h" import "C" import "unsafe" -func Atg__And_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___and__(ptr, self, other ) +func Atg__And_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___and__(ptr, self, other) } -func Atg__And1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___and__1(ptr, self, other) +func Atg__And1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___and__1(ptr, self, other) } -func Atg__Iand_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___iand__(ptr, self, other ) +func Atg__Iand_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___iand__(ptr, self, other) } -func Atg__Iand1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___iand__1(ptr, self, other) +func Atg__Iand1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___iand__1(ptr, self, other) } -func Atg__Ilshift_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___ilshift__(ptr, self, other ) +func Atg__Ilshift_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___ilshift__(ptr, self, other) } -func Atg__Ilshift1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___ilshift__1(ptr, self, other) +func Atg__Ilshift1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___ilshift__1(ptr, self, other) } -func Atg__Ior_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___ior__(ptr, self, other ) +func Atg__Ior_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___ior__(ptr, self, other) } -func Atg__Ior1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___ior__1(ptr, self, other) +func Atg__Ior1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___ior__1(ptr, self, other) } -func Atg__Irshift_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___irshift__(ptr, self, other ) +func Atg__Irshift_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___irshift__(ptr, self, other) } -func Atg__Irshift1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___irshift__1(ptr, self, other) +func Atg__Irshift1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___irshift__1(ptr, self, other) } -func Atg__Ixor_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___ixor__(ptr, self, other ) +func Atg__Ixor_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___ixor__(ptr, self, other) } -func Atg__Ixor1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___ixor__1(ptr, self, other) +func Atg__Ixor1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___ixor__1(ptr, self, other) } -func Atg__Lshift_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___lshift__(ptr, self, other ) +func Atg__Lshift_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___lshift__(ptr, self, other) } -func Atg__Lshift1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___lshift__1(ptr, self, other) +func Atg__Lshift1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___lshift__1(ptr, self, other) } -func Atg__Or_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___or__(ptr, self, other ) +func Atg__Or_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___or__(ptr, self, other) } -func Atg__Or1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___or__1(ptr, self, other) +func Atg__Or1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___or__1(ptr, self, other) } -func Atg__Rshift_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___rshift__(ptr, self, other ) +func Atg__Rshift_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___rshift__(ptr, self, other) } -func Atg__Rshift1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___rshift__1(ptr, self, other) +func Atg__Rshift1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___rshift__1(ptr, self, other) } -func Atg__Xor_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg___xor__(ptr, self, other ) +func Atg__Xor_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg___xor__(ptr, self, other) } -func Atg__Xor1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg___xor__1(ptr, self, other) +func Atg__Xor1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg___xor__1(ptr, self, other) } -func Atg_AdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg__adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func Atg_AdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg__adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func Atg_AdaptiveAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ -C.atg__adaptive_avg_pool2d_backward(ptr, gradOutput, self) +func Atg_AdaptiveAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor) { + C.atg__adaptive_avg_pool2d_backward(ptr, gradOutput, self) } -func Atg_Addr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ -C.atg__addr(ptr, self, vec1, vec2) +func Atg_Addr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { + C.atg__addr(ptr, self, vec1, vec2) } -func Atg_Addr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ -C.atg__addr_(ptr, self, vec1, vec2) +func Atg_Addr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { + C.atg__addr_(ptr, self, vec1, vec2) } -func Atg_AddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ -C.atg__addr_out(ptr, out, self, vec1, vec2) -} -func Atg_AmpUpdateScale(ptr *Ctensor, growthTracker Ctensor, currentScale Ctensor, foundInf Ctensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64){ -cscaleGrowthFactor := *(*C.double)(unsafe.Pointer(&scaleGrowthFactor)) -cscaleBackoffFactor := *(*C.double)(unsafe.Pointer(&scaleBackoffFactor)) -cgrowthInterval := *(*C.int64_t)(unsafe.Pointer(&growthInterval)) -C.atg__amp_update_scale(ptr, growthTracker, currentScale, foundInf, cscaleGrowthFactor, cscaleBackoffFactor, cgrowthInterval) -} -func Atg_BaddbmmMkl_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ -C.atg__baddbmm_mkl_(ptr, self, batch1, batch2) -} -func Atg_CastByte(ptr *Ctensor, self Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__cast_byte(ptr, self, cnonBlocking) -} -func Atg_CastChar(ptr *Ctensor, self Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__cast_char(ptr, self, cnonBlocking) -} -func Atg_CastDouble(ptr *Ctensor, self Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__cast_double(ptr, self, cnonBlocking) -} -func Atg_CastFloat(ptr *Ctensor, self Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__cast_float(ptr, self, cnonBlocking) -} -func Atg_CastHalf(ptr *Ctensor, self Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__cast_half(ptr, self, cnonBlocking) -} -func Atg_CastInt(ptr *Ctensor, self Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__cast_int(ptr, self, cnonBlocking) -} -func Atg_CastLong(ptr *Ctensor, self Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__cast_long(ptr, self, cnonBlocking) -} -func Atg_CastShort(ptr *Ctensor, self Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__cast_short(ptr, self, cnonBlocking) -} -func Atg_Cat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ -ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) -ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) -} -func Atg_CatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ -ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) -ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) -} -func Atg_CdistBackward(ptr *Ctensor, grad Ctensor, x1 Ctensor, x2 Ctensor, p float64, cdist Ctensor){ -cp := *(*C.double)(unsafe.Pointer(&p)) -C.atg__cdist_backward(ptr, grad, x1, x2, cp, cdist) -} -func Atg_CholeskyHelper(ptr *Ctensor, self Ctensor, upper int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg__cholesky_helper(ptr, self, cupper) -} -func Atg_CholeskySolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg__cholesky_solve_helper(ptr, self, a, cupper) -} -func Atg_Coalesced_(ptr *Ctensor, self Ctensor, coalesced int32){ -ccoalesced := *(*C.int)(unsafe.Pointer(&coalesced)) -C.atg__coalesced_(ptr, self, ccoalesced) -} -func Atg_Convolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64, benchmark int32, deterministic int32, cudnnEnabled int32){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) -C.atg__convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cbenchmark, cdeterministic, ccudnnEnabled) -} -func Atg_ConvolutionNogroup(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -C.atg__convolution_nogroup(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen) -} -func Atg_CopyFrom(ptr *Ctensor, self Ctensor, dst Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg__copy_from(ptr, self, dst, cnonBlocking) -} -func Atg_CtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, zeroInfinity int32){ -cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) -cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) -ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) -ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) -cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) -czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) -C.atg__ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, czeroInfinity) -} -func Atg_CtcLossBackward(ptr *Ctensor, grad Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, negLogLikelihood Ctensor, logAlpha Ctensor, blank int64, zeroInfinity int32){ -cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) -cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) -ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) -ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) -cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) -czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) -C.atg__ctc_loss_backward(ptr, grad, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, negLogLikelihood, logAlpha, cblank, czeroInfinity) -} -func Atg_CudnnCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, deterministic int32, zeroInfinity int32){ -cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) -cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) -ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) -ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) -cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) -C.atg__cudnn_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, cdeterministic, czeroInfinity) -} -func Atg_CudnnInitDropoutState(ptr *Ctensor, dropout float64, train int32, dropoutSeed int64, optionsKind int32, optionsDevice int32){ -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cdropoutSeed := *(*C.int64_t)(unsafe.Pointer(&dropoutSeed)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg__cudnn_init_dropout_state(ptr, cdropout, ctrain, cdropoutSeed, coptionsKind, coptionsDevice) -} -func Atg_CudnnRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, weightBuf Ctensor, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){ -cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) -cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) -cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) -cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) -C.atg__cudnn_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, weightBuf, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) -} -func Atg_CudnnRnnFlattenWeight(ptr *Ctensor, weightArrData []Ctensor, weightArrLen int, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, bidirectional int32){ -cweightArrDataPtr := (*Ctensor)(unsafe.Pointer(&weightArrData[0])) -cweightArrLen := *(*C.int)(unsafe.Pointer(&weightArrLen)) -cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) -cinputSize := *(*C.int64_t)(unsafe.Pointer(&inputSize)) -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -C.atg__cudnn_rnn_flatten_weight(ptr, cweightArrDataPtr, cweightArrLen, cweightStride0, cinputSize, cmode, chiddenSize, cnumLayers, cbatchFirst, cbidirectional) -} -func Atg_Cumprod(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__cumprod(ptr, self, cdim) -} -func Atg_CumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__cumprod_out(ptr, out, self, cdim) -} -func Atg_Cumsum(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__cumsum(ptr, self, cdim) -} -func Atg_CumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__cumsum_out(ptr, out, self, cdim) -} -func Atg_DimArange(ptr *Ctensor, like Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__dim_arange(ptr, like, cdim) -} -func Atg_DirichletGrad(ptr *Ctensor, x Ctensor, alpha Ctensor, total Ctensor){ -C.atg__dirichlet_grad(ptr, x, alpha, total) -} -func Atg_EmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){ -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -csparse := *(*C.int)(unsafe.Pointer(&sparse)) -cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) -C.atg__embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) -} -func Atg_EmbeddingBagBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor){ -cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -csparse := *(*C.int)(unsafe.Pointer(&sparse)) -C.atg__embedding_bag_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, csparse, perSampleWeights) -} -func Atg_EmbeddingBagDenseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor){ -cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -C.atg__embedding_bag_dense_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) -} -func Atg_EmbeddingBagPerSampleWeightsBackward(ptr *Ctensor, grad Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, mode int64){ -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -C.atg__embedding_bag_per_sample_weights_backward(ptr, grad, weight, indices, offsets, offset2bag, cmode) -} -func Atg_EmbeddingBagSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor){ -cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -C.atg__embedding_bag_sparse_backward(ptr, grad, indices, offsets, offset2bag, bagSize, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) -} -func Atg_EmptyAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32, scale float64, zeroPoint int64){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -cscale := *(*C.double)(unsafe.Pointer(&scale)) -czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) -C.atg__empty_affine_quantized(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice, cscale, czeroPoint) -} -func Atg_EmptyPerChannelAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, scales Ctensor, zeroPoints Ctensor, axis int64, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg__empty_per_channel_affine_quantized(ptr, csizeDataPtr, csizeLen, scales, zeroPoints, caxis, coptionsKind, coptionsDevice) -} -func Atg_FftWithSize(ptr *Ctensor, self Ctensor, signalNdim int64, complexInput int32, complexOutput int32, inverse int32, checkedSignalSizesData []int64, checkedSignalSizesLen int, normalized int32, onesided int32, outputSizesData []int64, outputSizesLen int){ -csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) -ccomplexInput := *(*C.int)(unsafe.Pointer(&complexInput)) -ccomplexOutput := *(*C.int)(unsafe.Pointer(&complexOutput)) -cinverse := *(*C.int)(unsafe.Pointer(&inverse)) -ccheckedSignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&checkedSignalSizesData[0])) -ccheckedSignalSizesLen := *(*C.int)(unsafe.Pointer(&checkedSignalSizesLen)) -cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) -conesided := *(*C.int)(unsafe.Pointer(&onesided)) -coutputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizesData[0])) -coutputSizesLen := *(*C.int)(unsafe.Pointer(&outputSizesLen)) -C.atg__fft_with_size(ptr, self, csignalNdim, ccomplexInput, ccomplexOutput, cinverse, ccheckedSignalSizesDataPtr, ccheckedSignalSizesLen, cnormalized, conesided, coutputSizesDataPtr, coutputSizesLen) -} -func Atg_FusedDropout(ptr *Ctensor, self Ctensor, p float64){ -cp := *(*C.double)(unsafe.Pointer(&p)) -C.atg__fused_dropout(ptr, self, cp) -} -func Atg_GatherSparseBackward(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, grad Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__gather_sparse_backward(ptr, self, cdim, index, grad) -} -func Atg_IndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__index_copy_(ptr, self, cdim, index, source) -} -func Atg_IndexPutImpl_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32, unsafety int32){ -cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) -cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) -caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) -cunsafety := *(*C.int)(unsafe.Pointer(&unsafety)) -C.atg__index_put_impl_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate, cunsafety) -} -func Atg_Indices(ptr *Ctensor, self Ctensor){ -C.atg__indices(ptr, self) -} -func Atg_InverseHelper(ptr *Ctensor, self Ctensor){ -C.atg__inverse_helper(ptr, self) -} -func Atg_LogSoftmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) -C.atg__log_softmax(ptr, self, cdim, chalfToFloat) -} -func Atg_LogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__log_softmax_backward_data(ptr, gradOutput, output, cdim, self) -} -func Atg_LuSolveHelper(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ -C.atg__lu_solve_helper(ptr, self, lUData, lUPivots) -} -func Atg_LuWithInfo(ptr *Ctensor, self Ctensor, pivot int32, checkErrors int32){ -cpivot := *(*C.int)(unsafe.Pointer(&pivot)) -ccheckErrors := *(*C.int)(unsafe.Pointer(&checkErrors)) -C.atg__lu_with_info(ptr, self, cpivot, ccheckErrors) -} -func Atg_MakePerChannelQuantizedTensor(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64){ -caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) -C.atg__make_per_channel_quantized_tensor(ptr, self, scale, zeroPoint, caxis) -} -func Atg_MakePerTensorQuantizedTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64){ -cscale := *(*C.double)(unsafe.Pointer(&scale)) -czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) -C.atg__make_per_tensor_quantized_tensor(ptr, self, cscale, czeroPoint) -} -func Atg_MaskedScale(ptr *Ctensor, self Ctensor, mask Ctensor, scale float64){ -cscale := *(*C.double)(unsafe.Pointer(&scale)) -C.atg__masked_scale(ptr, self, mask, cscale) -} -func Atg_Max(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg__max(ptr, self, cdim, ckeepdim) -} -func Atg_MaxOut(ptr *Ctensor, max Ctensor, maxIndices Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg__max_out(ptr, max, maxIndices, self, cdim, ckeepdim) -} -func Atg_Min(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg__min(ptr, self, cdim, ckeepdim) -} -func Atg_MinOut(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg__min_out(ptr, min, minIndices, self, cdim, ckeepdim) -} -func Atg_MkldnnReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){ -cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) -cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) -C.atg__mkldnn_reshape(ptr, self, cshapeDataPtr, cshapeLen) -} -func Atg_MkldnnTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ -cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) -cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) -C.atg__mkldnn_transpose(ptr, self, cdim0, cdim1) -} -func Atg_MkldnnTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ -cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) -cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) -C.atg__mkldnn_transpose_(ptr, self, cdim0, cdim1) -} -func Atg_Mode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg__mode(ptr, self, cdim, ckeepdim) -} -func Atg_ModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg__mode_out(ptr, values, indices, self, cdim, ckeepdim) -} -func Atg_MultinomialAliasDraw(ptr *Ctensor, j Ctensor, q Ctensor, numSamples int64){ -cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) -C.atg__multinomial_alias_draw(ptr, j, q, cnumSamples) -} -func Atg_MultinomialAliasSetup(ptr *Ctensor, probs Ctensor){ -C.atg__multinomial_alias_setup(ptr, probs) -} -func Atg_NnpackSpatialConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg__nnpack_spatial_convolution(ptr, input, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func Atg_NnpackSpatialConvolutionBackwardInput(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg__nnpack_spatial_convolution_backward_input(ptr, input, gradOutput, weight, cpaddingDataPtr, cpaddingLen) -} -func Atg_NnpackSpatialConvolutionBackwardWeight(ptr *Ctensor, input Ctensor, weightsizeData []int64, weightsizeLen int, gradOutput Ctensor, paddingData []int64, paddingLen int){ -cweightsizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightsizeData[0])) -cweightsizeLen := *(*C.int)(unsafe.Pointer(&weightsizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg__nnpack_spatial_convolution_backward_weight(ptr, input, cweightsizeDataPtr, cweightsizeLen, gradOutput, cpaddingDataPtr, cpaddingLen) -} -func Atg_PackPaddedSequence(ptr *Ctensor, input Ctensor, lengths Ctensor, batchFirst int32){ -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -C.atg__pack_padded_sequence(ptr, input, lengths, cbatchFirst) -} -func Atg_PackPaddedSequenceBackward(ptr *Ctensor, grad Ctensor, inputSizeData []int64, inputSizeLen int, batchSizes Ctensor, batchFirst int32){ -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -C.atg__pack_padded_sequence_backward(ptr, grad, cinputSizeDataPtr, cinputSizeLen, batchSizes, cbatchFirst) -} -func Atg_PadPackedSequence(ptr *Ctensor, data Ctensor, batchSizes Ctensor, batchFirst int32, paddingValue Cscalar, totalLength int64){ -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -ctotalLength := *(*C.int64_t)(unsafe.Pointer(&totalLength)) -C.atg__pad_packed_sequence(ptr, data, batchSizes, cbatchFirst, paddingValue , ctotalLength) -} -func Atg_PdistBackward(ptr *Ctensor, grad Ctensor, self Ctensor, p float64, pdist Ctensor){ -cp := *(*C.double)(unsafe.Pointer(&p)) -C.atg__pdist_backward(ptr, grad, self, cp, pdist) -} -func Atg_QrHelper(ptr *Ctensor, self Ctensor, some int32){ -csome := *(*C.int)(unsafe.Pointer(&some)) -C.atg__qr_helper(ptr, self, csome) -} -func Atg_ReshapeFromTensor(ptr *Ctensor, self Ctensor, shape Ctensor){ -C.atg__reshape_from_tensor(ptr, self, shape) -} -func Atg_SWhere(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor){ -C.atg__s_where(ptr, condition, self, other) -} -func Atg_SampleDirichlet(ptr *Ctensor, self Ctensor){ -C.atg__sample_dirichlet(ptr, self) -} -func Atg_ShapeAsTensor(ptr *Ctensor, self Ctensor){ -C.atg__shape_as_tensor(ptr, self) -} -func Atg_SobolEngineDraw(ptr *Ctensor, quasi Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64, dtype int32){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) -cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg__sobol_engine_draw(ptr, quasi, cn, sobolstate, cdimension, cnumGenerated, cdtype) -} -func Atg_SobolEngineFf_(ptr *Ctensor, self Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) -cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) -C.atg__sobol_engine_ff_(ptr, self, cn, sobolstate, cdimension, cnumGenerated) -} -func Atg_SobolEngineInitializeState_(ptr *Ctensor, self Ctensor, dimension int64){ -cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) -C.atg__sobol_engine_initialize_state_(ptr, self, cdimension) -} -func Atg_SobolEngineScramble_(ptr *Ctensor, self Ctensor, ltm Ctensor, dimension int64){ -cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) -C.atg__sobol_engine_scramble_(ptr, self, ltm, cdimension) -} -func Atg_Softmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) -C.atg__softmax(ptr, self, cdim, chalfToFloat) -} -func Atg_SoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__softmax_backward_data(ptr, gradOutput, output, cdim, self) -} -func Atg_SolveHelper(ptr *Ctensor, self Ctensor, a Ctensor){ -C.atg__solve_helper(ptr, self, a) -} -func Atg_SparseAddmm(ptr *Ctensor, self Ctensor, sparse Ctensor, dense Ctensor){ -C.atg__sparse_addmm(ptr, self, sparse, dense) -} -func Atg_SparseCooTensorUnsafe(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg__sparse_coo_tensor_unsafe(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func Atg_SparseCooTensorWithDims(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) -cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg__sparse_coo_tensor_with_dims(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func Atg_SparseCooTensorWithDimsAndTensors(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32){ -csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) -cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg__sparse_coo_tensor_with_dims_and_tensors(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, indices, values, coptionsKind, coptionsDevice) -} -func Atg_SparseMm(ptr *Ctensor, sparse Ctensor, dense Ctensor){ -C.atg__sparse_mm(ptr, sparse, dense) -} -func Atg_SparseSum(ptr *Ctensor, self Ctensor){ -C.atg__sparse_sum(ptr, self) -} -func Atg_SparseSum1(ptr *Ctensor, self Ctensor, dtype int32){ -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg__sparse_sum1(ptr, self, cdtype) -} -func Atg_SparseSum2(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -C.atg__sparse_sum2(ptr, self, cdimDataPtr, cdimLen) -} -func Atg_SparseSum3(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, dtype int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg__sparse_sum3(ptr, self, cdimDataPtr, cdimLen, cdtype) -} -func Atg_SparseSumBackward(ptr *Ctensor, grad Ctensor, self Ctensor, dimData []int64, dimLen int){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -C.atg__sparse_sum_backward(ptr, grad, self, cdimDataPtr, cdimLen) -} -func Atg_StandardGamma(ptr *Ctensor, self Ctensor){ -C.atg__standard_gamma(ptr, self) -} -func Atg_StandardGammaGrad(ptr *Ctensor, self Ctensor, output Ctensor){ -C.atg__standard_gamma_grad(ptr, self, output) -} -func Atg_Std(ptr *Ctensor, self Ctensor, unbiased int32){ -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -C.atg__std(ptr, self, cunbiased) -} -func Atg_SvdHelper(ptr *Ctensor, self Ctensor, some int32, computeUv int32){ -csome := *(*C.int)(unsafe.Pointer(&some)) -ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) -C.atg__svd_helper(ptr, self, csome, ccomputeUv) -} -func Atg_SymeigHelper(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){ -ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg__symeig_helper(ptr, self, ceigenvectors, cupper) -} -func Atg_TriangularSolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) -cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) -C.atg__triangular_solve_helper(ptr, self, a, cupper, ctranspose, cunitriangular) -} -func Atg_Trilinear(ptr *Ctensor, i1 Ctensor, i2 Ctensor, i3 Ctensor, expand1Data []int64, expand1Len int, expand2Data []int64, expand2Len int, expand3Data []int64, expand3Len int, sumdimData []int64, sumdimLen int, unrollDim int64){ -cexpand1DataPtr := (*C.int64_t)(unsafe.Pointer(&expand1Data[0])) -cexpand1Len := *(*C.int)(unsafe.Pointer(&expand1Len)) -cexpand2DataPtr := (*C.int64_t)(unsafe.Pointer(&expand2Data[0])) -cexpand2Len := *(*C.int)(unsafe.Pointer(&expand2Len)) -cexpand3DataPtr := (*C.int64_t)(unsafe.Pointer(&expand3Data[0])) -cexpand3Len := *(*C.int)(unsafe.Pointer(&expand3Len)) -csumdimDataPtr := (*C.int64_t)(unsafe.Pointer(&sumdimData[0])) -csumdimLen := *(*C.int)(unsafe.Pointer(&sumdimLen)) -cunrollDim := *(*C.int64_t)(unsafe.Pointer(&unrollDim)) -C.atg__trilinear(ptr, i1, i2, i3, cexpand1DataPtr, cexpand1Len, cexpand2DataPtr, cexpand2Len, cexpand3DataPtr, cexpand3Len, csumdimDataPtr, csumdimLen, cunrollDim) -} -func Atg_Unique(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32){ -csorted := *(*C.int)(unsafe.Pointer(&sorted)) -creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) -C.atg__unique(ptr, self, csorted, creturnInverse) -} -func Atg_Unique2(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32, returnCounts int32){ -csorted := *(*C.int)(unsafe.Pointer(&sorted)) -creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) -creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) -C.atg__unique2(ptr, self, csorted, creturnInverse, creturnCounts) -} -func Atg_UnsafeView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg__unsafe_view(ptr, self, csizeDataPtr, csizeLen) -} -func Atg_Values(ptr *Ctensor, self Ctensor){ -C.atg__values(ptr, self) -} -func Atg_Var(ptr *Ctensor, self Ctensor, unbiased int32){ -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -C.atg__var(ptr, self, cunbiased) -} -func Atg_WeightNorm(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__weight_norm(ptr, v, g, cdim) -} -func Atg_WeightNormCudaInterface(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__weight_norm_cuda_interface(ptr, v, g, cdim) -} -func Atg_WeightNormCudaInterfaceBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__weight_norm_cuda_interface_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) -} -func Atg_WeightNormDifferentiableBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg__weight_norm_differentiable_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) -} -func AtgAbs(ptr *Ctensor, self Ctensor){ -C.atg_abs(ptr, self) -} -func AtgAbs_(ptr *Ctensor, self Ctensor){ -C.atg_abs_(ptr, self) -} -func AtgAbsOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_abs_out(ptr, out, self) -} -func AtgAcos(ptr *Ctensor, self Ctensor){ -C.atg_acos(ptr, self) -} -func AtgAcos_(ptr *Ctensor, self Ctensor){ -C.atg_acos_(ptr, self) -} -func AtgAcosOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_acos_out(ptr, out, self) +func Atg_AddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { + C.atg__addr_out(ptr, out, self, vec1, vec2) +} +func Atg_AmpUpdateScale(ptr *Ctensor, growthTracker Ctensor, currentScale Ctensor, foundInf Ctensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) { + cscaleGrowthFactor := *(*C.double)(unsafe.Pointer(&scaleGrowthFactor)) + cscaleBackoffFactor := *(*C.double)(unsafe.Pointer(&scaleBackoffFactor)) + cgrowthInterval := *(*C.int64_t)(unsafe.Pointer(&growthInterval)) + C.atg__amp_update_scale(ptr, growthTracker, currentScale, foundInf, cscaleGrowthFactor, cscaleBackoffFactor, cgrowthInterval) +} +func Atg_BaddbmmMkl_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { + C.atg__baddbmm_mkl_(ptr, self, batch1, batch2) +} +func Atg_CastByte(ptr *Ctensor, self Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__cast_byte(ptr, self, cnonBlocking) +} +func Atg_CastChar(ptr *Ctensor, self Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__cast_char(ptr, self, cnonBlocking) +} +func Atg_CastDouble(ptr *Ctensor, self Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__cast_double(ptr, self, cnonBlocking) +} +func Atg_CastFloat(ptr *Ctensor, self Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__cast_float(ptr, self, cnonBlocking) +} +func Atg_CastHalf(ptr *Ctensor, self Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__cast_half(ptr, self, cnonBlocking) +} +func Atg_CastInt(ptr *Ctensor, self Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__cast_int(ptr, self, cnonBlocking) +} +func Atg_CastLong(ptr *Ctensor, self Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__cast_long(ptr, self, cnonBlocking) +} +func Atg_CastShort(ptr *Ctensor, self Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__cast_short(ptr, self, cnonBlocking) +} +func Atg_Cat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { + ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) + ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) +} +func Atg_CatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { + ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) + ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +} +func Atg_CdistBackward(ptr *Ctensor, grad Ctensor, x1 Ctensor, x2 Ctensor, p float64, cdist Ctensor) { + cp := *(*C.double)(unsafe.Pointer(&p)) + C.atg__cdist_backward(ptr, grad, x1, x2, cp, cdist) +} +func Atg_CholeskyHelper(ptr *Ctensor, self Ctensor, upper int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg__cholesky_helper(ptr, self, cupper) +} +func Atg_CholeskySolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg__cholesky_solve_helper(ptr, self, a, cupper) +} +func Atg_Coalesced_(ptr *Ctensor, self Ctensor, coalesced int32) { + ccoalesced := *(*C.int)(unsafe.Pointer(&coalesced)) + C.atg__coalesced_(ptr, self, ccoalesced) +} +func Atg_Convolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64, benchmark int32, deterministic int32, cudnnEnabled int32) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) + C.atg__convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cbenchmark, cdeterministic, ccudnnEnabled) +} +func Atg_ConvolutionNogroup(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + C.atg__convolution_nogroup(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen) +} +func Atg_CopyFrom(ptr *Ctensor, self Ctensor, dst Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg__copy_from(ptr, self, dst, cnonBlocking) +} +func Atg_CtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, zeroInfinity int32) { + cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) + cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) + ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) + ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) + cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) + czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) + C.atg__ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, czeroInfinity) +} +func Atg_CtcLossBackward(ptr *Ctensor, grad Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, negLogLikelihood Ctensor, logAlpha Ctensor, blank int64, zeroInfinity int32) { + cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) + cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) + ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) + ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) + cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) + czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) + C.atg__ctc_loss_backward(ptr, grad, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, negLogLikelihood, logAlpha, cblank, czeroInfinity) +} +func Atg_CudnnCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, deterministic int32, zeroInfinity int32) { + cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) + cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) + ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) + ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) + cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) + C.atg__cudnn_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, cdeterministic, czeroInfinity) +} +func Atg_CudnnInitDropoutState(ptr *Ctensor, dropout float64, train int32, dropoutSeed int64, optionsKind int32, optionsDevice int32) { + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cdropoutSeed := *(*C.int64_t)(unsafe.Pointer(&dropoutSeed)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg__cudnn_init_dropout_state(ptr, cdropout, ctrain, cdropoutSeed, coptionsKind, coptionsDevice) +} +func Atg_CudnnRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, weightBuf Ctensor, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor) { + cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) + cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) + cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) + cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) + C.atg__cudnn_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, weightBuf, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) +} +func Atg_CudnnRnnFlattenWeight(ptr *Ctensor, weightArrData []Ctensor, weightArrLen int, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, bidirectional int32) { + cweightArrDataPtr := (*Ctensor)(unsafe.Pointer(&weightArrData[0])) + cweightArrLen := *(*C.int)(unsafe.Pointer(&weightArrLen)) + cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) + cinputSize := *(*C.int64_t)(unsafe.Pointer(&inputSize)) + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + C.atg__cudnn_rnn_flatten_weight(ptr, cweightArrDataPtr, cweightArrLen, cweightStride0, cinputSize, cmode, chiddenSize, cnumLayers, cbatchFirst, cbidirectional) +} +func Atg_Cumprod(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__cumprod(ptr, self, cdim) +} +func Atg_CumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__cumprod_out(ptr, out, self, cdim) +} +func Atg_Cumsum(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__cumsum(ptr, self, cdim) +} +func Atg_CumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__cumsum_out(ptr, out, self, cdim) +} +func Atg_DimArange(ptr *Ctensor, like Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__dim_arange(ptr, like, cdim) +} +func Atg_DirichletGrad(ptr *Ctensor, x Ctensor, alpha Ctensor, total Ctensor) { + C.atg__dirichlet_grad(ptr, x, alpha, total) +} +func Atg_EmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32) { + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + csparse := *(*C.int)(unsafe.Pointer(&sparse)) + cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) + C.atg__embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) +} +func Atg_EmbeddingBagBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor) { + cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + csparse := *(*C.int)(unsafe.Pointer(&sparse)) + C.atg__embedding_bag_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, csparse, perSampleWeights) +} +func Atg_EmbeddingBagDenseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor) { + cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + C.atg__embedding_bag_dense_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) +} +func Atg_EmbeddingBagPerSampleWeightsBackward(ptr *Ctensor, grad Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, mode int64) { + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + C.atg__embedding_bag_per_sample_weights_backward(ptr, grad, weight, indices, offsets, offset2bag, cmode) +} +func Atg_EmbeddingBagSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor) { + cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + C.atg__embedding_bag_sparse_backward(ptr, grad, indices, offsets, offset2bag, bagSize, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) +} +func Atg_EmptyAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32, scale float64, zeroPoint int64) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + cscale := *(*C.double)(unsafe.Pointer(&scale)) + czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) + C.atg__empty_affine_quantized(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice, cscale, czeroPoint) +} +func Atg_EmptyPerChannelAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, scales Ctensor, zeroPoints Ctensor, axis int64, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg__empty_per_channel_affine_quantized(ptr, csizeDataPtr, csizeLen, scales, zeroPoints, caxis, coptionsKind, coptionsDevice) +} +func Atg_FftWithSize(ptr *Ctensor, self Ctensor, signalNdim int64, complexInput int32, complexOutput int32, inverse int32, checkedSignalSizesData []int64, checkedSignalSizesLen int, normalized int32, onesided int32, outputSizesData []int64, outputSizesLen int) { + csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) + ccomplexInput := *(*C.int)(unsafe.Pointer(&complexInput)) + ccomplexOutput := *(*C.int)(unsafe.Pointer(&complexOutput)) + cinverse := *(*C.int)(unsafe.Pointer(&inverse)) + ccheckedSignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&checkedSignalSizesData[0])) + ccheckedSignalSizesLen := *(*C.int)(unsafe.Pointer(&checkedSignalSizesLen)) + cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) + conesided := *(*C.int)(unsafe.Pointer(&onesided)) + coutputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizesData[0])) + coutputSizesLen := *(*C.int)(unsafe.Pointer(&outputSizesLen)) + C.atg__fft_with_size(ptr, self, csignalNdim, ccomplexInput, ccomplexOutput, cinverse, ccheckedSignalSizesDataPtr, ccheckedSignalSizesLen, cnormalized, conesided, coutputSizesDataPtr, coutputSizesLen) +} +func Atg_FusedDropout(ptr *Ctensor, self Ctensor, p float64) { + cp := *(*C.double)(unsafe.Pointer(&p)) + C.atg__fused_dropout(ptr, self, cp) +} +func Atg_GatherSparseBackward(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, grad Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__gather_sparse_backward(ptr, self, cdim, index, grad) +} +func Atg_IndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__index_copy_(ptr, self, cdim, index, source) +} +func Atg_IndexPutImpl_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32, unsafety int32) { + cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) + cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) + caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) + cunsafety := *(*C.int)(unsafe.Pointer(&unsafety)) + C.atg__index_put_impl_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate, cunsafety) +} +func Atg_Indices(ptr *Ctensor, self Ctensor) { + C.atg__indices(ptr, self) +} +func Atg_InverseHelper(ptr *Ctensor, self Ctensor) { + C.atg__inverse_helper(ptr, self) +} +func Atg_LogSoftmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) + C.atg__log_softmax(ptr, self, cdim, chalfToFloat) +} +func Atg_LogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__log_softmax_backward_data(ptr, gradOutput, output, cdim, self) +} +func Atg_LuSolveHelper(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor) { + C.atg__lu_solve_helper(ptr, self, lUData, lUPivots) +} +func Atg_LuWithInfo(ptr *Ctensor, self Ctensor, pivot int32, checkErrors int32) { + cpivot := *(*C.int)(unsafe.Pointer(&pivot)) + ccheckErrors := *(*C.int)(unsafe.Pointer(&checkErrors)) + C.atg__lu_with_info(ptr, self, cpivot, ccheckErrors) +} +func Atg_MakePerChannelQuantizedTensor(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64) { + caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) + C.atg__make_per_channel_quantized_tensor(ptr, self, scale, zeroPoint, caxis) +} +func Atg_MakePerTensorQuantizedTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64) { + cscale := *(*C.double)(unsafe.Pointer(&scale)) + czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) + C.atg__make_per_tensor_quantized_tensor(ptr, self, cscale, czeroPoint) +} +func Atg_MaskedScale(ptr *Ctensor, self Ctensor, mask Ctensor, scale float64) { + cscale := *(*C.double)(unsafe.Pointer(&scale)) + C.atg__masked_scale(ptr, self, mask, cscale) +} +func Atg_Max(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg__max(ptr, self, cdim, ckeepdim) +} +func Atg_MaxOut(ptr *Ctensor, max Ctensor, maxIndices Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg__max_out(ptr, max, maxIndices, self, cdim, ckeepdim) +} +func Atg_Min(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg__min(ptr, self, cdim, ckeepdim) +} +func Atg_MinOut(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg__min_out(ptr, min, minIndices, self, cdim, ckeepdim) +} +func Atg_MkldnnReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int) { + cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) + cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) + C.atg__mkldnn_reshape(ptr, self, cshapeDataPtr, cshapeLen) +} +func Atg_MkldnnTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { + cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) + cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) + C.atg__mkldnn_transpose(ptr, self, cdim0, cdim1) +} +func Atg_MkldnnTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { + cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) + cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) + C.atg__mkldnn_transpose_(ptr, self, cdim0, cdim1) +} +func Atg_Mode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg__mode(ptr, self, cdim, ckeepdim) +} +func Atg_ModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg__mode_out(ptr, values, indices, self, cdim, ckeepdim) +} +func Atg_MultinomialAliasDraw(ptr *Ctensor, j Ctensor, q Ctensor, numSamples int64) { + cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) + C.atg__multinomial_alias_draw(ptr, j, q, cnumSamples) +} +func Atg_MultinomialAliasSetup(ptr *Ctensor, probs Ctensor) { + C.atg__multinomial_alias_setup(ptr, probs) +} +func Atg_NnpackSpatialConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg__nnpack_spatial_convolution(ptr, input, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func Atg_NnpackSpatialConvolutionBackwardInput(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg__nnpack_spatial_convolution_backward_input(ptr, input, gradOutput, weight, cpaddingDataPtr, cpaddingLen) +} +func Atg_NnpackSpatialConvolutionBackwardWeight(ptr *Ctensor, input Ctensor, weightsizeData []int64, weightsizeLen int, gradOutput Ctensor, paddingData []int64, paddingLen int) { + cweightsizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightsizeData[0])) + cweightsizeLen := *(*C.int)(unsafe.Pointer(&weightsizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg__nnpack_spatial_convolution_backward_weight(ptr, input, cweightsizeDataPtr, cweightsizeLen, gradOutput, cpaddingDataPtr, cpaddingLen) +} +func Atg_PackPaddedSequence(ptr *Ctensor, input Ctensor, lengths Ctensor, batchFirst int32) { + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + C.atg__pack_padded_sequence(ptr, input, lengths, cbatchFirst) +} +func Atg_PackPaddedSequenceBackward(ptr *Ctensor, grad Ctensor, inputSizeData []int64, inputSizeLen int, batchSizes Ctensor, batchFirst int32) { + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + C.atg__pack_padded_sequence_backward(ptr, grad, cinputSizeDataPtr, cinputSizeLen, batchSizes, cbatchFirst) +} +func Atg_PadPackedSequence(ptr *Ctensor, data Ctensor, batchSizes Ctensor, batchFirst int32, paddingValue Cscalar, totalLength int64) { + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + ctotalLength := *(*C.int64_t)(unsafe.Pointer(&totalLength)) + C.atg__pad_packed_sequence(ptr, data, batchSizes, cbatchFirst, paddingValue, ctotalLength) +} +func Atg_PdistBackward(ptr *Ctensor, grad Ctensor, self Ctensor, p float64, pdist Ctensor) { + cp := *(*C.double)(unsafe.Pointer(&p)) + C.atg__pdist_backward(ptr, grad, self, cp, pdist) +} +func Atg_QrHelper(ptr *Ctensor, self Ctensor, some int32) { + csome := *(*C.int)(unsafe.Pointer(&some)) + C.atg__qr_helper(ptr, self, csome) +} +func Atg_ReshapeFromTensor(ptr *Ctensor, self Ctensor, shape Ctensor) { + C.atg__reshape_from_tensor(ptr, self, shape) +} +func Atg_SWhere(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor) { + C.atg__s_where(ptr, condition, self, other) +} +func Atg_SampleDirichlet(ptr *Ctensor, self Ctensor) { + C.atg__sample_dirichlet(ptr, self) +} +func Atg_ShapeAsTensor(ptr *Ctensor, self Ctensor) { + C.atg__shape_as_tensor(ptr, self) +} +func Atg_SobolEngineDraw(ptr *Ctensor, quasi Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64, dtype int32) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) + cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg__sobol_engine_draw(ptr, quasi, cn, sobolstate, cdimension, cnumGenerated, cdtype) +} +func Atg_SobolEngineFf_(ptr *Ctensor, self Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) + cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) + C.atg__sobol_engine_ff_(ptr, self, cn, sobolstate, cdimension, cnumGenerated) +} +func Atg_SobolEngineInitializeState_(ptr *Ctensor, self Ctensor, dimension int64) { + cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) + C.atg__sobol_engine_initialize_state_(ptr, self, cdimension) +} +func Atg_SobolEngineScramble_(ptr *Ctensor, self Ctensor, ltm Ctensor, dimension int64) { + cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) + C.atg__sobol_engine_scramble_(ptr, self, ltm, cdimension) +} +func Atg_Softmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) + C.atg__softmax(ptr, self, cdim, chalfToFloat) +} +func Atg_SoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__softmax_backward_data(ptr, gradOutput, output, cdim, self) +} +func Atg_SolveHelper(ptr *Ctensor, self Ctensor, a Ctensor) { + C.atg__solve_helper(ptr, self, a) +} +func Atg_SparseAddmm(ptr *Ctensor, self Ctensor, sparse Ctensor, dense Ctensor) { + C.atg__sparse_addmm(ptr, self, sparse, dense) +} +func Atg_SparseCooTensorUnsafe(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg__sparse_coo_tensor_unsafe(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func Atg_SparseCooTensorWithDims(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) + cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg__sparse_coo_tensor_with_dims(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func Atg_SparseCooTensorWithDimsAndTensors(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32) { + csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) + cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg__sparse_coo_tensor_with_dims_and_tensors(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, indices, values, coptionsKind, coptionsDevice) +} +func Atg_SparseMm(ptr *Ctensor, sparse Ctensor, dense Ctensor) { + C.atg__sparse_mm(ptr, sparse, dense) +} +func Atg_SparseSum(ptr *Ctensor, self Ctensor) { + C.atg__sparse_sum(ptr, self) +} +func Atg_SparseSum1(ptr *Ctensor, self Ctensor, dtype int32) { + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg__sparse_sum1(ptr, self, cdtype) +} +func Atg_SparseSum2(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + C.atg__sparse_sum2(ptr, self, cdimDataPtr, cdimLen) +} +func Atg_SparseSum3(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, dtype int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg__sparse_sum3(ptr, self, cdimDataPtr, cdimLen, cdtype) +} +func Atg_SparseSumBackward(ptr *Ctensor, grad Ctensor, self Ctensor, dimData []int64, dimLen int) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + C.atg__sparse_sum_backward(ptr, grad, self, cdimDataPtr, cdimLen) +} +func Atg_StandardGamma(ptr *Ctensor, self Ctensor) { + C.atg__standard_gamma(ptr, self) +} +func Atg_StandardGammaGrad(ptr *Ctensor, self Ctensor, output Ctensor) { + C.atg__standard_gamma_grad(ptr, self, output) +} +func Atg_Std(ptr *Ctensor, self Ctensor, unbiased int32) { + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + C.atg__std(ptr, self, cunbiased) +} +func Atg_SvdHelper(ptr *Ctensor, self Ctensor, some int32, computeUv int32) { + csome := *(*C.int)(unsafe.Pointer(&some)) + ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) + C.atg__svd_helper(ptr, self, csome, ccomputeUv) +} +func Atg_SymeigHelper(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32) { + ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg__symeig_helper(ptr, self, ceigenvectors, cupper) +} +func Atg_TriangularSolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) + cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) + C.atg__triangular_solve_helper(ptr, self, a, cupper, ctranspose, cunitriangular) +} +func Atg_Trilinear(ptr *Ctensor, i1 Ctensor, i2 Ctensor, i3 Ctensor, expand1Data []int64, expand1Len int, expand2Data []int64, expand2Len int, expand3Data []int64, expand3Len int, sumdimData []int64, sumdimLen int, unrollDim int64) { + cexpand1DataPtr := (*C.int64_t)(unsafe.Pointer(&expand1Data[0])) + cexpand1Len := *(*C.int)(unsafe.Pointer(&expand1Len)) + cexpand2DataPtr := (*C.int64_t)(unsafe.Pointer(&expand2Data[0])) + cexpand2Len := *(*C.int)(unsafe.Pointer(&expand2Len)) + cexpand3DataPtr := (*C.int64_t)(unsafe.Pointer(&expand3Data[0])) + cexpand3Len := *(*C.int)(unsafe.Pointer(&expand3Len)) + csumdimDataPtr := (*C.int64_t)(unsafe.Pointer(&sumdimData[0])) + csumdimLen := *(*C.int)(unsafe.Pointer(&sumdimLen)) + cunrollDim := *(*C.int64_t)(unsafe.Pointer(&unrollDim)) + C.atg__trilinear(ptr, i1, i2, i3, cexpand1DataPtr, cexpand1Len, cexpand2DataPtr, cexpand2Len, cexpand3DataPtr, cexpand3Len, csumdimDataPtr, csumdimLen, cunrollDim) +} +func Atg_Unique(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32) { + csorted := *(*C.int)(unsafe.Pointer(&sorted)) + creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) + C.atg__unique(ptr, self, csorted, creturnInverse) +} +func Atg_Unique2(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32, returnCounts int32) { + csorted := *(*C.int)(unsafe.Pointer(&sorted)) + creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) + creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) + C.atg__unique2(ptr, self, csorted, creturnInverse, creturnCounts) +} +func Atg_UnsafeView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg__unsafe_view(ptr, self, csizeDataPtr, csizeLen) +} +func Atg_Values(ptr *Ctensor, self Ctensor) { + C.atg__values(ptr, self) +} +func Atg_Var(ptr *Ctensor, self Ctensor, unbiased int32) { + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + C.atg__var(ptr, self, cunbiased) +} +func Atg_WeightNorm(ptr *Ctensor, v Ctensor, g Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__weight_norm(ptr, v, g, cdim) +} +func Atg_WeightNormCudaInterface(ptr *Ctensor, v Ctensor, g Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__weight_norm_cuda_interface(ptr, v, g, cdim) +} +func Atg_WeightNormCudaInterfaceBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__weight_norm_cuda_interface_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) +} +func Atg_WeightNormDifferentiableBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg__weight_norm_differentiable_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) +} +func AtgAbs(ptr *Ctensor, self Ctensor) { + C.atg_abs(ptr, self) +} +func AtgAbs_(ptr *Ctensor, self Ctensor) { + C.atg_abs_(ptr, self) +} +func AtgAbsOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_abs_out(ptr, out, self) +} +func AtgAcos(ptr *Ctensor, self Ctensor) { + C.atg_acos(ptr, self) +} +func AtgAcos_(ptr *Ctensor, self Ctensor) { + C.atg_acos_(ptr, self) +} +func AtgAcosOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_acos_out(ptr, out, self) } -func AtgAdaptiveAvgPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_avg_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveAvgPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_avg_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_avg_pool2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_avg_pool2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveAvgPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_avg_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveAvgPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_avg_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ -C.atg_adaptive_avg_pool3d_backward(ptr, gradOutput, self) +func AtgAdaptiveAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor) { + C.atg_adaptive_avg_pool3d_backward(ptr, gradOutput, self) } -func AtgAdaptiveAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor){ -C.atg_adaptive_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self) +func AtgAdaptiveAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor) { + C.atg_adaptive_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self) } -func AtgAdaptiveAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_avg_pool3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_avg_pool3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveMaxPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_max_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveMaxPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_max_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveMaxPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_max_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveMaxPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_max_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ -C.atg_adaptive_max_pool2d_backward(ptr, gradOutput, self, indices) +func AtgAdaptiveMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor) { + C.atg_adaptive_max_pool2d_backward(ptr, gradOutput, self, indices) } -func AtgAdaptiveMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ -C.atg_adaptive_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, indices) +func AtgAdaptiveMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor) { + C.atg_adaptive_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, indices) } -func AtgAdaptiveMaxPool2dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_max_pool2d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveMaxPool2dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_max_pool2d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveMaxPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_max_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveMaxPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_max_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ -C.atg_adaptive_max_pool3d_backward(ptr, gradOutput, self, indices) +func AtgAdaptiveMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor) { + C.atg_adaptive_max_pool3d_backward(ptr, gradOutput, self, indices) } -func AtgAdaptiveMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ -C.atg_adaptive_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, indices) +func AtgAdaptiveMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor) { + C.atg_adaptive_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, indices) } -func AtgAdaptiveMaxPool3dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_adaptive_max_pool3d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveMaxPool3dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_adaptive_max_pool3d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdd(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_add(ptr, self, other) +func AtgAdd(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_add(ptr, self, other) } -func AtgAdd1(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_add1(ptr, self, other ) +func AtgAdd1(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_add1(ptr, self, other) } -func AtgAdd_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_add_(ptr, self, other) +func AtgAdd_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_add_(ptr, self, other) } -func AtgAdd1_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_add_1(ptr, self, other ) +func AtgAdd1_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_add_1(ptr, self, other) } -func AtgAddOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_add_out(ptr, out, self, other) +func AtgAddOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_add_out(ptr, out, self, other) } -func AtgAddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ -C.atg_addbmm(ptr, self, batch1, batch2) +func AtgAddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { + C.atg_addbmm(ptr, self, batch1, batch2) } -func AtgAddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ -C.atg_addbmm_(ptr, self, batch1, batch2) +func AtgAddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { + C.atg_addbmm_(ptr, self, batch1, batch2) } -func AtgAddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ -C.atg_addbmm_out(ptr, out, self, batch1, batch2) +func AtgAddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { + C.atg_addbmm_out(ptr, out, self, batch1, batch2) } -func AtgAddcdiv(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ -C.atg_addcdiv(ptr, self, tensor1, tensor2) +func AtgAddcdiv(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { + C.atg_addcdiv(ptr, self, tensor1, tensor2) } -func AtgAddcdiv_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ -C.atg_addcdiv_(ptr, self, tensor1, tensor2) +func AtgAddcdiv_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { + C.atg_addcdiv_(ptr, self, tensor1, tensor2) } -func AtgAddcdivOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ -C.atg_addcdiv_out(ptr, out, self, tensor1, tensor2) +func AtgAddcdivOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { + C.atg_addcdiv_out(ptr, out, self, tensor1, tensor2) } -func AtgAddcmul(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ -C.atg_addcmul(ptr, self, tensor1, tensor2) +func AtgAddcmul(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { + C.atg_addcmul(ptr, self, tensor1, tensor2) } -func AtgAddcmul_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ -C.atg_addcmul_(ptr, self, tensor1, tensor2) +func AtgAddcmul_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { + C.atg_addcmul_(ptr, self, tensor1, tensor2) } -func AtgAddcmulOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ -C.atg_addcmul_out(ptr, out, self, tensor1, tensor2) +func AtgAddcmulOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { + C.atg_addcmul_out(ptr, out, self, tensor1, tensor2) } -func AtgAddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ -C.atg_addmm(ptr, self, mat1, mat2) +func AtgAddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { + C.atg_addmm(ptr, self, mat1, mat2) } -func AtgAddmm_(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ -C.atg_addmm_(ptr, self, mat1, mat2) +func AtgAddmm_(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { + C.atg_addmm_(ptr, self, mat1, mat2) } -func AtgAddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ -C.atg_addmm_out(ptr, out, self, mat1, mat2) +func AtgAddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { + C.atg_addmm_out(ptr, out, self, mat1, mat2) } -func AtgAddmv(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ -C.atg_addmv(ptr, self, mat, vec) +func AtgAddmv(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor) { + C.atg_addmv(ptr, self, mat, vec) } -func AtgAddmv_(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ -C.atg_addmv_(ptr, self, mat, vec) +func AtgAddmv_(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor) { + C.atg_addmv_(ptr, self, mat, vec) } -func AtgAddmvOut(ptr *Ctensor, out Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ -C.atg_addmv_out(ptr, out, self, mat, vec) +func AtgAddmvOut(ptr *Ctensor, out Ctensor, self Ctensor, mat Ctensor, vec Ctensor) { + C.atg_addmv_out(ptr, out, self, mat, vec) } -func AtgAddr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ -C.atg_addr(ptr, self, vec1, vec2) +func AtgAddr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { + C.atg_addr(ptr, self, vec1, vec2) } -func AtgAddr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ -C.atg_addr_(ptr, self, vec1, vec2) +func AtgAddr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { + C.atg_addr_(ptr, self, vec1, vec2) } -func AtgAddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ -C.atg_addr_out(ptr, out, self, vec1, vec2) +func AtgAddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { + C.atg_addr_out(ptr, out, self, vec1, vec2) } -func AtgAffineGridGenerator(ptr *Ctensor, theta Ctensor, sizeData []int64, sizeLen int, alignCorners int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -C.atg_affine_grid_generator(ptr, theta, csizeDataPtr, csizeLen, calignCorners) +func AtgAffineGridGenerator(ptr *Ctensor, theta Ctensor, sizeData []int64, sizeLen int, alignCorners int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + C.atg_affine_grid_generator(ptr, theta, csizeDataPtr, csizeLen, calignCorners) } -func AtgAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, sizeData []int64, sizeLen int, alignCorners int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -C.atg_affine_grid_generator_backward(ptr, grad, csizeDataPtr, csizeLen, calignCorners) +func AtgAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, sizeData []int64, sizeLen int, alignCorners int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + C.atg_affine_grid_generator_backward(ptr, grad, csizeDataPtr, csizeLen, calignCorners) } -func AtgAlias(ptr *Ctensor, self Ctensor){ -C.atg_alias(ptr, self) +func AtgAlias(ptr *Ctensor, self Ctensor) { + C.atg_alias(ptr, self) } -func AtgAlignAs(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_align_as(ptr, self, other) +func AtgAlignAs(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_align_as(ptr, self, other) } -func AtgAll(ptr *Ctensor, self Ctensor){ -C.atg_all(ptr, self) -} -func AtgAll1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_all1(ptr, self, cdim, ckeepdim) -} -func AtgAllOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_all_out(ptr, out, self, cdim, ckeepdim) -} -func AtgAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -C.atg_alpha_dropout(ptr, input, cp, ctrain) -} -func AtgAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -C.atg_alpha_dropout_(ptr, self, cp, ctrain) -} -func AtgAngle(ptr *Ctensor, self Ctensor){ -C.atg_angle(ptr, self) -} -func AtgAngleOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_angle_out(ptr, out, self) -} -func AtgAny(ptr *Ctensor, self Ctensor){ -C.atg_any(ptr, self) -} -func AtgAny1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_any1(ptr, self, cdim, ckeepdim) -} -func AtgAnyOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_any_out(ptr, out, self, cdim, ckeepdim) -} -func AtgArange(ptr *Ctensor, end Cscalar, optionsKind int32, optionsDevice int32){ -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_arange(ptr, end , coptionsKind, coptionsDevice) -} -func AtgArange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_arange1(ptr, start , end , coptionsKind, coptionsDevice) -} -func AtgArange2(ptr *Ctensor, start Cscalar, end Cscalar, step Cscalar, optionsKind int32, optionsDevice int32){ -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_arange2(ptr, start , end , step , coptionsKind, coptionsDevice) -} -func AtgArangeOut(ptr *Ctensor, out Ctensor, end Cscalar){ -C.atg_arange_out(ptr, out, end ) -} -func AtgArangeOut1(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar){ -C.atg_arange_out1(ptr, out, start , end ) -} -func AtgArgmax(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_argmax(ptr, self, cdim, ckeepdim) -} -func AtgArgmin(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_argmin(ptr, self, cdim, ckeepdim) -} -func AtgArgsort(ptr *Ctensor, self Ctensor, dim int64, descending int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdescending := *(*C.int)(unsafe.Pointer(&descending)) -C.atg_argsort(ptr, self, cdim, cdescending) -} -func AtgAsStrided(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset)) -C.atg_as_strided(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset) -} -func AtgAsStrided_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset)) -C.atg_as_strided_(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset) -} -func AtgAsin(ptr *Ctensor, self Ctensor){ -C.atg_asin(ptr, self) -} -func AtgAsin_(ptr *Ctensor, self Ctensor){ -C.atg_asin_(ptr, self) -} -func AtgAsinOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_asin_out(ptr, out, self) -} -func AtgAtan(ptr *Ctensor, self Ctensor){ -C.atg_atan(ptr, self) -} -func AtgAtan2(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_atan2(ptr, self, other) -} -func AtgAtan2_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_atan2_(ptr, self, other) -} -func AtgAtan2Out(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_atan2_out(ptr, out, self, other) -} -func AtgAtan_(ptr *Ctensor, self Ctensor){ -C.atg_atan_(ptr, self) -} -func AtgAtanOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_atan_out(ptr, out, self) -} -func AtgAvgPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -C.atg_avg_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad) -} -func AtgAvgPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) -C.atg_avg_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) -C.atg_avg_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) -C.atg_avg_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) -C.atg_avg_pool2d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) -C.atg_avg_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) -C.atg_avg_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) -C.atg_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) -cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) -C.atg_avg_pool3d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgBaddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ -C.atg_baddbmm(ptr, self, batch1, batch2) -} -func AtgBaddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ -C.atg_baddbmm_(ptr, self, batch1, batch2) -} -func AtgBaddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ -C.atg_baddbmm_out(ptr, out, self, batch1, batch2) -} -func AtgBartlettWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_bartlett_window(ptr, cwindowLength, coptionsKind, coptionsDevice) -} -func AtgBartlettWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_bartlett_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) -} -func AtgBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64, cudnnEnabled int32){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) -C.atg_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps, ccudnnEnabled) -} -func AtgBatchNormBackwardElemt(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, meanDy Ctensor, meanDyXmu Ctensor){ -C.atg_batch_norm_backward_elemt(ptr, gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) -} -func AtgBatchNormBackwardReduce(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, inputG int32, weightG int32, biasG int32){ -cinputG := *(*C.int)(unsafe.Pointer(&inputG)) -cweightG := *(*C.int)(unsafe.Pointer(&weightG)) -cbiasG := *(*C.int)(unsafe.Pointer(&biasG)) -C.atg_batch_norm_backward_reduce(ptr, gradOut, input, mean, invstd, weight, cinputG, cweightG, cbiasG) -} -func AtgBatchNormElemt(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64){ -ceps := *(*C.double)(unsafe.Pointer(&eps)) -C.atg_batch_norm_elemt(ptr, input, weight, bias, mean, invstd, ceps) -} -func AtgBatchNormElemtOut(ptr *Ctensor, out Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64){ -ceps := *(*C.double)(unsafe.Pointer(&eps)) -C.atg_batch_norm_elemt_out(ptr, out, input, weight, bias, mean, invstd, ceps) -} -func AtgBatchNormGatherStats(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, count int64){ -cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -ccount := *(*C.int64_t)(unsafe.Pointer(&count)) -C.atg_batch_norm_gather_stats(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccount) -} -func AtgBatchNormGatherStatsWithCounts(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, countsData []int64, countsLen int){ -cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -ccountsDataPtr := (*C.int64_t)(unsafe.Pointer(&countsData[0])) -ccountsLen := *(*C.int)(unsafe.Pointer(&countsLen)) -C.atg_batch_norm_gather_stats_with_counts(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccountsDataPtr, ccountsLen) -} -func AtgBatchNormStats(ptr *Ctensor, input Ctensor, eps float64){ -ceps := *(*C.double)(unsafe.Pointer(&eps)) -C.atg_batch_norm_stats(ptr, input, ceps) -} -func AtgBatchNormUpdateStats(ptr *Ctensor, input Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64){ -cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) -C.atg_batch_norm_update_stats(ptr, input, runningMean, runningVar, cmomentum) -} -func AtgBernoulli(ptr *Ctensor, self Ctensor){ -C.atg_bernoulli(ptr, self) -} -func AtgBernoulli1(ptr *Ctensor, self Ctensor, p float64){ -cp := *(*C.double)(unsafe.Pointer(&p)) -C.atg_bernoulli1(ptr, self, cp) -} -func AtgBernoulli_(ptr *Ctensor, self Ctensor, p Ctensor){ -C.atg_bernoulli_(ptr, self, p) -} -func AtgBernoulli1_(ptr *Ctensor, self Ctensor, p float64){ -cp := *(*C.double)(unsafe.Pointer(&p)) -C.atg_bernoulli_1(ptr, self, cp) +func AtgAll(ptr *Ctensor, self Ctensor) { + C.atg_all(ptr, self) +} +func AtgAll1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_all1(ptr, self, cdim, ckeepdim) +} +func AtgAllOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_all_out(ptr, out, self, cdim, ckeepdim) +} +func AtgAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_alpha_dropout(ptr, input, cp, ctrain) +} +func AtgAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_alpha_dropout_(ptr, self, cp, ctrain) +} +func AtgAngle(ptr *Ctensor, self Ctensor) { + C.atg_angle(ptr, self) +} +func AtgAngleOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_angle_out(ptr, out, self) +} +func AtgAny(ptr *Ctensor, self Ctensor) { + C.atg_any(ptr, self) +} +func AtgAny1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_any1(ptr, self, cdim, ckeepdim) +} +func AtgAnyOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_any_out(ptr, out, self, cdim, ckeepdim) +} +func AtgArange(ptr *Ctensor, end Cscalar, optionsKind int32, optionsDevice int32) { + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_arange(ptr, end, coptionsKind, coptionsDevice) +} +func AtgArange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32) { + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_arange1(ptr, start, end, coptionsKind, coptionsDevice) +} +func AtgArange2(ptr *Ctensor, start Cscalar, end Cscalar, step Cscalar, optionsKind int32, optionsDevice int32) { + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_arange2(ptr, start, end, step, coptionsKind, coptionsDevice) +} +func AtgArangeOut(ptr *Ctensor, out Ctensor, end Cscalar) { + C.atg_arange_out(ptr, out, end) +} +func AtgArangeOut1(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar) { + C.atg_arange_out1(ptr, out, start, end) +} +func AtgArgmax(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_argmax(ptr, self, cdim, ckeepdim) +} +func AtgArgmin(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_argmin(ptr, self, cdim, ckeepdim) +} +func AtgArgsort(ptr *Ctensor, self Ctensor, dim int64, descending int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdescending := *(*C.int)(unsafe.Pointer(&descending)) + C.atg_argsort(ptr, self, cdim, cdescending) +} +func AtgAsStrided(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset)) + C.atg_as_strided(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset) +} +func AtgAsStrided_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset)) + C.atg_as_strided_(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset) +} +func AtgAsin(ptr *Ctensor, self Ctensor) { + C.atg_asin(ptr, self) +} +func AtgAsin_(ptr *Ctensor, self Ctensor) { + C.atg_asin_(ptr, self) +} +func AtgAsinOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_asin_out(ptr, out, self) +} +func AtgAtan(ptr *Ctensor, self Ctensor) { + C.atg_atan(ptr, self) +} +func AtgAtan2(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_atan2(ptr, self, other) +} +func AtgAtan2_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_atan2_(ptr, self, other) +} +func AtgAtan2Out(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_atan2_out(ptr, out, self, other) +} +func AtgAtan_(ptr *Ctensor, self Ctensor) { + C.atg_atan_(ptr, self) +} +func AtgAtanOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_atan_out(ptr, out, self) +} +func AtgAvgPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + C.atg_avg_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad) +} +func AtgAvgPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) + C.atg_avg_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) + C.atg_avg_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) + C.atg_avg_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) + C.atg_avg_pool2d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) + C.atg_avg_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) + C.atg_avg_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) + C.atg_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) + cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) + C.atg_avg_pool3d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgBaddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { + C.atg_baddbmm(ptr, self, batch1, batch2) +} +func AtgBaddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { + C.atg_baddbmm_(ptr, self, batch1, batch2) +} +func AtgBaddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { + C.atg_baddbmm_out(ptr, out, self, batch1, batch2) +} +func AtgBartlettWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_bartlett_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgBartlettWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_bartlett_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64, cudnnEnabled int32) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) + C.atg_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps, ccudnnEnabled) +} +func AtgBatchNormBackwardElemt(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, meanDy Ctensor, meanDyXmu Ctensor) { + C.atg_batch_norm_backward_elemt(ptr, gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) +} +func AtgBatchNormBackwardReduce(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, inputG int32, weightG int32, biasG int32) { + cinputG := *(*C.int)(unsafe.Pointer(&inputG)) + cweightG := *(*C.int)(unsafe.Pointer(&weightG)) + cbiasG := *(*C.int)(unsafe.Pointer(&biasG)) + C.atg_batch_norm_backward_reduce(ptr, gradOut, input, mean, invstd, weight, cinputG, cweightG, cbiasG) +} +func AtgBatchNormElemt(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64) { + ceps := *(*C.double)(unsafe.Pointer(&eps)) + C.atg_batch_norm_elemt(ptr, input, weight, bias, mean, invstd, ceps) +} +func AtgBatchNormElemtOut(ptr *Ctensor, out Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64) { + ceps := *(*C.double)(unsafe.Pointer(&eps)) + C.atg_batch_norm_elemt_out(ptr, out, input, weight, bias, mean, invstd, ceps) +} +func AtgBatchNormGatherStats(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, count int64) { + cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + ccount := *(*C.int64_t)(unsafe.Pointer(&count)) + C.atg_batch_norm_gather_stats(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccount) +} +func AtgBatchNormGatherStatsWithCounts(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, countsData []int64, countsLen int) { + cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + ccountsDataPtr := (*C.int64_t)(unsafe.Pointer(&countsData[0])) + ccountsLen := *(*C.int)(unsafe.Pointer(&countsLen)) + C.atg_batch_norm_gather_stats_with_counts(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccountsDataPtr, ccountsLen) +} +func AtgBatchNormStats(ptr *Ctensor, input Ctensor, eps float64) { + ceps := *(*C.double)(unsafe.Pointer(&eps)) + C.atg_batch_norm_stats(ptr, input, ceps) +} +func AtgBatchNormUpdateStats(ptr *Ctensor, input Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64) { + cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) + C.atg_batch_norm_update_stats(ptr, input, runningMean, runningVar, cmomentum) +} +func AtgBernoulli(ptr *Ctensor, self Ctensor) { + C.atg_bernoulli(ptr, self) +} +func AtgBernoulli1(ptr *Ctensor, self Ctensor, p float64) { + cp := *(*C.double)(unsafe.Pointer(&p)) + C.atg_bernoulli1(ptr, self, cp) +} +func AtgBernoulli_(ptr *Ctensor, self Ctensor, p Ctensor) { + C.atg_bernoulli_(ptr, self, p) +} +func AtgBernoulli1_(ptr *Ctensor, self Ctensor, p float64) { + cp := *(*C.double)(unsafe.Pointer(&p)) + C.atg_bernoulli_1(ptr, self, cp) } -func AtgBernoulliOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_bernoulli_out(ptr, out, self) +func AtgBernoulliOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_bernoulli_out(ptr, out, self) } -func AtgBilinear(ptr *Ctensor, input1 Ctensor, input2 Ctensor, weight Ctensor, bias Ctensor){ -C.atg_bilinear(ptr, input1, input2, weight, bias) +func AtgBilinear(ptr *Ctensor, input1 Ctensor, input2 Ctensor, weight Ctensor, bias Ctensor) { + C.atg_bilinear(ptr, input1, input2, weight, bias) } -func AtgBinaryCrossEntropy(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_binary_cross_entropy(ptr, self, target, weight, creduction) +func AtgBinaryCrossEntropy(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_binary_cross_entropy(ptr, self, target, weight, creduction) } -func AtgBinaryCrossEntropyBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_binary_cross_entropy_backward(ptr, gradOutput, self, target, weight, creduction) +func AtgBinaryCrossEntropyBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_binary_cross_entropy_backward(ptr, gradOutput, self, target, weight, creduction) } -func AtgBinaryCrossEntropyBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_binary_cross_entropy_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction) +func AtgBinaryCrossEntropyBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_binary_cross_entropy_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction) } -func AtgBinaryCrossEntropyOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_binary_cross_entropy_out(ptr, out, self, target, weight, creduction) +func AtgBinaryCrossEntropyOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_binary_cross_entropy_out(ptr, out, self, target, weight, creduction) } -func AtgBinaryCrossEntropyWithLogits(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_binary_cross_entropy_with_logits(ptr, self, target, weight, posWeight, creduction) +func AtgBinaryCrossEntropyWithLogits(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_binary_cross_entropy_with_logits(ptr, self, target, weight, posWeight, creduction) } -func AtgBinaryCrossEntropyWithLogitsBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_binary_cross_entropy_with_logits_backward(ptr, gradOutput, self, target, weight, posWeight, creduction) +func AtgBinaryCrossEntropyWithLogitsBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_binary_cross_entropy_with_logits_backward(ptr, gradOutput, self, target, weight, posWeight, creduction) } -func AtgBincount(ptr *Ctensor, self Ctensor, weights Ctensor, minlength int64){ -cminlength := *(*C.int64_t)(unsafe.Pointer(&minlength)) -C.atg_bincount(ptr, self, weights, cminlength) +func AtgBincount(ptr *Ctensor, self Ctensor, weights Ctensor, minlength int64) { + cminlength := *(*C.int64_t)(unsafe.Pointer(&minlength)) + C.atg_bincount(ptr, self, weights, cminlength) } -func AtgBitwiseAnd(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_and(ptr, self, other ) +func AtgBitwiseAnd(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_and(ptr, self, other) } -func AtgBitwiseAnd1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_and1(ptr, self, other) +func AtgBitwiseAnd1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_and1(ptr, self, other) } -func AtgBitwiseAnd_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_and_(ptr, self, other ) +func AtgBitwiseAnd_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_and_(ptr, self, other) } -func AtgBitwiseAnd1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_and_1(ptr, self, other) +func AtgBitwiseAnd1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_and_1(ptr, self, other) } -func AtgBitwiseAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_and_out(ptr, out, self, other) +func AtgBitwiseAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_and_out(ptr, out, self, other) } -func AtgBitwiseAndOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_and_out1(ptr, out, self, other ) +func AtgBitwiseAndOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_and_out1(ptr, out, self, other) } -func AtgBitwiseNot(ptr *Ctensor, self Ctensor){ -C.atg_bitwise_not(ptr, self) +func AtgBitwiseNot(ptr *Ctensor, self Ctensor) { + C.atg_bitwise_not(ptr, self) } -func AtgBitwiseNot_(ptr *Ctensor, self Ctensor){ -C.atg_bitwise_not_(ptr, self) +func AtgBitwiseNot_(ptr *Ctensor, self Ctensor) { + C.atg_bitwise_not_(ptr, self) } -func AtgBitwiseNotOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_bitwise_not_out(ptr, out, self) +func AtgBitwiseNotOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_bitwise_not_out(ptr, out, self) } -func AtgBitwiseOr(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_or(ptr, self, other ) +func AtgBitwiseOr(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_or(ptr, self, other) } -func AtgBitwiseOr1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_or1(ptr, self, other) +func AtgBitwiseOr1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_or1(ptr, self, other) } -func AtgBitwiseOr_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_or_(ptr, self, other ) +func AtgBitwiseOr_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_or_(ptr, self, other) } -func AtgBitwiseOr1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_or_1(ptr, self, other) +func AtgBitwiseOr1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_or_1(ptr, self, other) } -func AtgBitwiseOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_or_out(ptr, out, self, other) +func AtgBitwiseOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_or_out(ptr, out, self, other) } -func AtgBitwiseOrOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_or_out1(ptr, out, self, other ) +func AtgBitwiseOrOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_or_out1(ptr, out, self, other) } -func AtgBitwiseXor(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_xor(ptr, self, other ) +func AtgBitwiseXor(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_xor(ptr, self, other) } -func AtgBitwiseXor1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_xor1(ptr, self, other) +func AtgBitwiseXor1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_xor1(ptr, self, other) } -func AtgBitwiseXor_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_xor_(ptr, self, other ) +func AtgBitwiseXor_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_xor_(ptr, self, other) } -func AtgBitwiseXor1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_xor_1(ptr, self, other) +func AtgBitwiseXor1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_xor_1(ptr, self, other) } -func AtgBitwiseXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_bitwise_xor_out(ptr, out, self, other) +func AtgBitwiseXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_bitwise_xor_out(ptr, out, self, other) } -func AtgBitwiseXorOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_bitwise_xor_out1(ptr, out, self, other ) +func AtgBitwiseXorOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_bitwise_xor_out1(ptr, out, self, other) } -func AtgBlackmanWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_blackman_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +func AtgBlackmanWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_blackman_window(ptr, cwindowLength, coptionsKind, coptionsDevice) } -func AtgBlackmanWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_blackman_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +func AtgBlackmanWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_blackman_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) } -func AtgBmm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ -C.atg_bmm(ptr, self, mat2) +func AtgBmm(ptr *Ctensor, self Ctensor, mat2 Ctensor) { + C.atg_bmm(ptr, self, mat2) } -func AtgBmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor){ -C.atg_bmm_out(ptr, out, self, mat2) +func AtgBmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor) { + C.atg_bmm_out(ptr, out, self, mat2) } -func AtgCartesianProd(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){ -ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) -ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) -C.atg_cartesian_prod(ptr, ctensorsDataPtr, ctensorsLen) +func AtgCartesianProd(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int) { + ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) + ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) + C.atg_cartesian_prod(ptr, ctensorsDataPtr, ctensorsLen) } -func AtgCat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ -ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) -ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) +func AtgCat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { + ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) + ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) } -func AtgCatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ -ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) -ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +func AtgCatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { + ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) + ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) } -func AtgCauchy_(ptr *Ctensor, self Ctensor, median float64, sigma float64){ -cmedian := *(*C.double)(unsafe.Pointer(&median)) -csigma := *(*C.double)(unsafe.Pointer(&sigma)) -C.atg_cauchy_(ptr, self, cmedian, csigma) +func AtgCauchy_(ptr *Ctensor, self Ctensor, median float64, sigma float64) { + cmedian := *(*C.double)(unsafe.Pointer(&median)) + csigma := *(*C.double)(unsafe.Pointer(&sigma)) + C.atg_cauchy_(ptr, self, cmedian, csigma) } -func AtgCdist(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, computeMode int64){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ccomputeMode := *(*C.int64_t)(unsafe.Pointer(&computeMode)) -C.atg_cdist(ptr, x1, x2, cp, ccomputeMode) +func AtgCdist(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, computeMode int64) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ccomputeMode := *(*C.int64_t)(unsafe.Pointer(&computeMode)) + C.atg_cdist(ptr, x1, x2, cp, ccomputeMode) } -func AtgCeil(ptr *Ctensor, self Ctensor){ -C.atg_ceil(ptr, self) +func AtgCeil(ptr *Ctensor, self Ctensor) { + C.atg_ceil(ptr, self) } -func AtgCeil_(ptr *Ctensor, self Ctensor){ -C.atg_ceil_(ptr, self) +func AtgCeil_(ptr *Ctensor, self Ctensor) { + C.atg_ceil_(ptr, self) } -func AtgCeilOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_ceil_out(ptr, out, self) +func AtgCeilOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_ceil_out(ptr, out, self) } -func AtgCelu(ptr *Ctensor, self Ctensor){ -C.atg_celu(ptr, self) +func AtgCelu(ptr *Ctensor, self Ctensor) { + C.atg_celu(ptr, self) } -func AtgCelu_(ptr *Ctensor, self Ctensor){ -C.atg_celu_(ptr, self) +func AtgCelu_(ptr *Ctensor, self Ctensor) { + C.atg_celu_(ptr, self) } -func AtgChainMatmul(ptr *Ctensor, matricesData []Ctensor, matricesLen int){ -cmatricesDataPtr := (*Ctensor)(unsafe.Pointer(&matricesData[0])) -cmatricesLen := *(*C.int)(unsafe.Pointer(&matricesLen)) -C.atg_chain_matmul(ptr, cmatricesDataPtr, cmatricesLen) +func AtgChainMatmul(ptr *Ctensor, matricesData []Ctensor, matricesLen int) { + cmatricesDataPtr := (*Ctensor)(unsafe.Pointer(&matricesData[0])) + cmatricesLen := *(*C.int)(unsafe.Pointer(&matricesLen)) + C.atg_chain_matmul(ptr, cmatricesDataPtr, cmatricesLen) } -func AtgCholesky(ptr *Ctensor, self Ctensor, upper int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg_cholesky(ptr, self, cupper) +func AtgCholesky(ptr *Ctensor, self Ctensor, upper int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg_cholesky(ptr, self, cupper) } -func AtgCholeskyInverse(ptr *Ctensor, self Ctensor, upper int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg_cholesky_inverse(ptr, self, cupper) +func AtgCholeskyInverse(ptr *Ctensor, self Ctensor, upper int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg_cholesky_inverse(ptr, self, cupper) } -func AtgCholeskyInverseOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg_cholesky_inverse_out(ptr, out, self, cupper) +func AtgCholeskyInverseOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg_cholesky_inverse_out(ptr, out, self, cupper) } -func AtgCholeskyOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg_cholesky_out(ptr, out, self, cupper) +func AtgCholeskyOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg_cholesky_out(ptr, out, self, cupper) } -func AtgCholeskySolve(ptr *Ctensor, self Ctensor, input2 Ctensor, upper int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg_cholesky_solve(ptr, self, input2, cupper) +func AtgCholeskySolve(ptr *Ctensor, self Ctensor, input2 Ctensor, upper int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg_cholesky_solve(ptr, self, input2, cupper) } -func AtgCholeskySolveOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, upper int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg_cholesky_solve_out(ptr, out, self, input2, cupper) +func AtgCholeskySolveOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, upper int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg_cholesky_solve_out(ptr, out, self, input2, cupper) } -func AtgClamp(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){ -C.atg_clamp(ptr, self, min , max ) -} -func AtgClamp_(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){ -C.atg_clamp_(ptr, self, min , max ) -} -func AtgClampMax(ptr *Ctensor, self Ctensor, max Cscalar){ -C.atg_clamp_max(ptr, self, max ) -} -func AtgClampMax_(ptr *Ctensor, self Ctensor, max Cscalar){ -C.atg_clamp_max_(ptr, self, max ) -} -func AtgClampMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, max Cscalar){ -C.atg_clamp_max_out(ptr, out, self, max ) -} -func AtgClampMin(ptr *Ctensor, self Ctensor, min Cscalar){ -C.atg_clamp_min(ptr, self, min ) -} -func AtgClampMin_(ptr *Ctensor, self Ctensor, min Cscalar){ -C.atg_clamp_min_(ptr, self, min ) -} -func AtgClampMinOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar){ -C.atg_clamp_min_out(ptr, out, self, min ) -} -func AtgClampOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar, max Cscalar){ -C.atg_clamp_out(ptr, out, self, min , max ) -} -func AtgCoalesce(ptr *Ctensor, self Ctensor){ -C.atg_coalesce(ptr, self) -} -func AtgCol2im(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg_col2im(ptr, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgCol2imBackward(ptr *Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg_col2im_backward(ptr, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgCol2imBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg_col2im_backward_out(ptr, gradInput, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgCol2imOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg_col2im_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgCombinations(ptr *Ctensor, self Ctensor, r int64, withReplacement int32){ -cr := *(*C.int64_t)(unsafe.Pointer(&r)) -cwithReplacement := *(*C.int)(unsafe.Pointer(&withReplacement)) -C.atg_combinations(ptr, self, cr, cwithReplacement) -} -func AtgConj(ptr *Ctensor, self Ctensor){ -C.atg_conj(ptr, self) -} -func AtgConjOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_conj_out(ptr, out, self) -} -func AtgConstantPadNd(ptr *Ctensor, self Ctensor, padData []int64, padLen int){ -cpadDataPtr := (*C.int64_t)(unsafe.Pointer(&padData[0])) -cpadLen := *(*C.int)(unsafe.Pointer(&padLen)) -C.atg_constant_pad_nd(ptr, self, cpadDataPtr, cpadLen) -} -func AtgContiguous(ptr *Ctensor, self Ctensor){ -C.atg_contiguous(ptr, self) -} -func AtgConv1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -C.atg_conv1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgConv2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -C.atg_conv2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgConv3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -C.atg_conv3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgConvTbc(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, pad int64){ -cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) -C.atg_conv_tbc(ptr, self, weight, bias, cpad) -} -func AtgConvTbcBackward(ptr *Ctensor, self Ctensor, input Ctensor, weight Ctensor, bias Ctensor, pad int64){ -cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) -C.atg_conv_tbc_backward(ptr, self, input, weight, bias, cpad) -} -func AtgConvTranspose1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_conv_transpose1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} -func AtgConvTranspose2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_conv_transpose2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} -func AtgConvTranspose3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_conv_transpose3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} -func AtgConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -C.atg_convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) -} -func AtgConvolutionOverrideable(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64){ -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -C.atg_convolution_overrideable(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) -} -func AtgCopySparseToSparse_(ptr *Ctensor, self Ctensor, src Ctensor, nonBlocking int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -C.atg_copy_sparse_to_sparse_(ptr, self, src, cnonBlocking) -} -func AtgCos(ptr *Ctensor, self Ctensor){ -C.atg_cos(ptr, self) -} -func AtgCos_(ptr *Ctensor, self Ctensor){ -C.atg_cos_(ptr, self) -} -func AtgCosOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_cos_out(ptr, out, self) -} -func AtgCosh(ptr *Ctensor, self Ctensor){ -C.atg_cosh(ptr, self) -} -func AtgCosh_(ptr *Ctensor, self Ctensor){ -C.atg_cosh_(ptr, self) -} -func AtgCoshOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_cosh_out(ptr, out, self) -} -func AtgCosineEmbeddingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64){ -cmargin := *(*C.double)(unsafe.Pointer(&margin)) -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_cosine_embedding_loss(ptr, input1, input2, target, cmargin, creduction) -} -func AtgCosineSimilarity(ptr *Ctensor, x1 Ctensor, x2 Ctensor, dim int64, eps float64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -C.atg_cosine_similarity(ptr, x1, x2, cdim, ceps) -} -func AtgCross(ptr *Ctensor, self Ctensor, other Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_cross(ptr, self, other, cdim) -} -func AtgCrossOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_cross_out(ptr, out, self, other, cdim) -} -func AtgCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, reduction int64, zeroInfinity int32){ -cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) -cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) -ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) -ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) -cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) -C.atg_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, creduction, czeroInfinity) -} -func AtgCtcLoss1(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengths Ctensor, targetLengths Ctensor, blank int64, reduction int64, zeroInfinity int32){ -cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) -C.atg_ctc_loss1(ptr, logProbs, targets, inputLengths, targetLengths, cblank, creduction, czeroInfinity) -} -func AtgCudnnAffineGridGenerator(ptr *Ctensor, theta Ctensor, n int64, c int64, h int64, w int64){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -cc := *(*C.int64_t)(unsafe.Pointer(&c)) -ch := *(*C.int64_t)(unsafe.Pointer(&h)) -cw := *(*C.int64_t)(unsafe.Pointer(&w)) -C.atg_cudnn_affine_grid_generator(ptr, theta, cn, cc, ch, cw) -} -func AtgCudnnAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, n int64, c int64, h int64, w int64){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -cc := *(*C.int64_t)(unsafe.Pointer(&c)) -ch := *(*C.int64_t)(unsafe.Pointer(&h)) -cw := *(*C.int64_t)(unsafe.Pointer(&w)) -C.atg_cudnn_affine_grid_generator_backward(ptr, grad, cn, cc, ch, cw) -} -func AtgCudnnBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) -cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) -C.atg_cudnn_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) -} -func AtgCudnnBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64, reserveSpace Ctensor){ -cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) -C.atg_cudnn_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon, reserveSpace) -} -func AtgCudnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_cudnn_convolution(ptr, self, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolution1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_cudnn_convolution1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) -cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_cudnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_cudnn_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_cudnn_convolution_transpose(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTranspose1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_cudnn_convolution_transpose1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_cudnn_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_cudnn_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnGridSampler(ptr *Ctensor, self Ctensor, grid Ctensor){ -C.atg_cudnn_grid_sampler(ptr, self, grid) -} -func AtgCudnnGridSamplerBackward(ptr *Ctensor, self Ctensor, grid Ctensor, gradOutput Ctensor){ -C.atg_cudnn_grid_sampler_backward(ptr, self, grid, gradOutput) -} -func AtgCummax(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_cummax(ptr, self, cdim) -} -func AtgCummaxOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_cummax_out(ptr, values, indices, self, cdim) -} -func AtgCummin(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_cummin(ptr, self, cdim) -} -func AtgCumminOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_cummin_out(ptr, values, indices, self, cdim) -} -func AtgCumprod(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_cumprod(ptr, self, cdim, cdtype) -} -func AtgCumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_cumprod_out(ptr, out, self, cdim, cdtype) -} -func AtgCumsum(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_cumsum(ptr, self, cdim, cdtype) -} -func AtgCumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_cumsum_out(ptr, out, self, cdim, cdtype) -} -func AtgData(ptr *Ctensor, self Ctensor){ -C.atg_data(ptr, self) -} -func AtgDequantize(ptr *Ctensor, self Ctensor){ -C.atg_dequantize(ptr, self) -} -func AtgDet(ptr *Ctensor, self Ctensor){ -C.atg_det(ptr, self) -} -func AtgDetach(ptr *Ctensor, self Ctensor){ -C.atg_detach(ptr, self) -} -func AtgDetach_(ptr *Ctensor, self Ctensor){ -C.atg_detach_(ptr, self) -} -func AtgDiag(ptr *Ctensor, self Ctensor, diagonal int64){ -cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) -C.atg_diag(ptr, self, cdiagonal) -} -func AtgDiagEmbed(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64){ -coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) -cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) -cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) -C.atg_diag_embed(ptr, self, coffset, cdim1, cdim2) -} -func AtgDiagOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ -cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) -C.atg_diag_out(ptr, out, self, cdiagonal) -} -func AtgDiagflat(ptr *Ctensor, self Ctensor, offset int64){ -coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) -C.atg_diagflat(ptr, self, coffset) -} -func AtgDiagonal(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64){ -coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) -cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) -cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) -C.atg_diagonal(ptr, self, coffset, cdim1, cdim2) -} -func AtgDigamma(ptr *Ctensor, self Ctensor){ -C.atg_digamma(ptr, self) -} -func AtgDigamma_(ptr *Ctensor, self Ctensor){ -C.atg_digamma_(ptr, self) -} -func AtgDigammaOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_digamma_out(ptr, out, self) -} -func AtgDist(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_dist(ptr, self, other) -} -func AtgDiv(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_div(ptr, self, other) -} -func AtgDiv1(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_div1(ptr, self, other ) -} -func AtgDiv_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_div_(ptr, self, other) -} -func AtgDiv1_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_div_1(ptr, self, other ) -} -func AtgDivOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_div_out(ptr, out, self, other) -} -func AtgDot(ptr *Ctensor, self Ctensor, tensor Ctensor){ -C.atg_dot(ptr, self, tensor) -} -func AtgDotOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor Ctensor){ -C.atg_dot_out(ptr, out, self, tensor) -} -func AtgDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -C.atg_dropout(ptr, input, cp, ctrain) -} -func AtgDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -C.atg_dropout_(ptr, self, cp, ctrain) -} -func AtgEig(ptr *Ctensor, self Ctensor, eigenvectors int32){ -ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) -C.atg_eig(ptr, self, ceigenvectors) -} -func AtgEigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32){ -ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) -C.atg_eig_out(ptr, e, v, self, ceigenvectors) -} -func AtgEinsum(ptr *Ctensor, equation string, tensorsData []Ctensor, tensorsLen int){ -cequation := C.CString(equation) -equationLen := len(equation) -cequationLen := *(*C.int)(unsafe.Pointer(&equationLen)) -ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) -ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) -C.atg_einsum(ptr, cequation, cequationLen, ctensorsDataPtr, ctensorsLen) +func AtgClamp(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar) { + C.atg_clamp(ptr, self, min, max) +} +func AtgClamp_(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar) { + C.atg_clamp_(ptr, self, min, max) +} +func AtgClampMax(ptr *Ctensor, self Ctensor, max Cscalar) { + C.atg_clamp_max(ptr, self, max) +} +func AtgClampMax_(ptr *Ctensor, self Ctensor, max Cscalar) { + C.atg_clamp_max_(ptr, self, max) +} +func AtgClampMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, max Cscalar) { + C.atg_clamp_max_out(ptr, out, self, max) +} +func AtgClampMin(ptr *Ctensor, self Ctensor, min Cscalar) { + C.atg_clamp_min(ptr, self, min) +} +func AtgClampMin_(ptr *Ctensor, self Ctensor, min Cscalar) { + C.atg_clamp_min_(ptr, self, min) +} +func AtgClampMinOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar) { + C.atg_clamp_min_out(ptr, out, self, min) +} +func AtgClampOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar, max Cscalar) { + C.atg_clamp_out(ptr, out, self, min, max) +} +func AtgCoalesce(ptr *Ctensor, self Ctensor) { + C.atg_coalesce(ptr, self) +} +func AtgCol2im(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg_col2im(ptr, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imBackward(ptr *Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg_col2im_backward(ptr, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg_col2im_backward_out(ptr, gradInput, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg_col2im_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCombinations(ptr *Ctensor, self Ctensor, r int64, withReplacement int32) { + cr := *(*C.int64_t)(unsafe.Pointer(&r)) + cwithReplacement := *(*C.int)(unsafe.Pointer(&withReplacement)) + C.atg_combinations(ptr, self, cr, cwithReplacement) +} +func AtgConj(ptr *Ctensor, self Ctensor) { + C.atg_conj(ptr, self) +} +func AtgConjOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_conj_out(ptr, out, self) +} +func AtgConstantPadNd(ptr *Ctensor, self Ctensor, padData []int64, padLen int) { + cpadDataPtr := (*C.int64_t)(unsafe.Pointer(&padData[0])) + cpadLen := *(*C.int)(unsafe.Pointer(&padLen)) + C.atg_constant_pad_nd(ptr, self, cpadDataPtr, cpadLen) +} +func AtgContiguous(ptr *Ctensor, self Ctensor) { + C.atg_contiguous(ptr, self) +} +func AtgConv1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + C.atg_conv1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConv2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + C.atg_conv2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConv3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + C.atg_conv3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConvTbc(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, pad int64) { + cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) + C.atg_conv_tbc(ptr, self, weight, bias, cpad) +} +func AtgConvTbcBackward(ptr *Ctensor, self Ctensor, input Ctensor, weight Ctensor, bias Ctensor, pad int64) { + cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) + C.atg_conv_tbc_backward(ptr, self, input, weight, bias, cpad) +} +func AtgConvTranspose1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_conv_transpose1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvTranspose2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_conv_transpose2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvTranspose3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_conv_transpose3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + C.atg_convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) +} +func AtgConvolutionOverrideable(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64) { + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + C.atg_convolution_overrideable(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) +} +func AtgCopySparseToSparse_(ptr *Ctensor, self Ctensor, src Ctensor, nonBlocking int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + C.atg_copy_sparse_to_sparse_(ptr, self, src, cnonBlocking) +} +func AtgCos(ptr *Ctensor, self Ctensor) { + C.atg_cos(ptr, self) +} +func AtgCos_(ptr *Ctensor, self Ctensor) { + C.atg_cos_(ptr, self) +} +func AtgCosOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_cos_out(ptr, out, self) +} +func AtgCosh(ptr *Ctensor, self Ctensor) { + C.atg_cosh(ptr, self) +} +func AtgCosh_(ptr *Ctensor, self Ctensor) { + C.atg_cosh_(ptr, self) +} +func AtgCoshOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_cosh_out(ptr, out, self) +} +func AtgCosineEmbeddingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64) { + cmargin := *(*C.double)(unsafe.Pointer(&margin)) + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_cosine_embedding_loss(ptr, input1, input2, target, cmargin, creduction) +} +func AtgCosineSimilarity(ptr *Ctensor, x1 Ctensor, x2 Ctensor, dim int64, eps float64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + C.atg_cosine_similarity(ptr, x1, x2, cdim, ceps) +} +func AtgCross(ptr *Ctensor, self Ctensor, other Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_cross(ptr, self, other, cdim) +} +func AtgCrossOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_cross_out(ptr, out, self, other, cdim) +} +func AtgCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, reduction int64, zeroInfinity int32) { + cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) + cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) + ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) + ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) + cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) + C.atg_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, creduction, czeroInfinity) +} +func AtgCtcLoss1(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengths Ctensor, targetLengths Ctensor, blank int64, reduction int64, zeroInfinity int32) { + cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) + C.atg_ctc_loss1(ptr, logProbs, targets, inputLengths, targetLengths, cblank, creduction, czeroInfinity) +} +func AtgCudnnAffineGridGenerator(ptr *Ctensor, theta Ctensor, n int64, c int64, h int64, w int64) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + cc := *(*C.int64_t)(unsafe.Pointer(&c)) + ch := *(*C.int64_t)(unsafe.Pointer(&h)) + cw := *(*C.int64_t)(unsafe.Pointer(&w)) + C.atg_cudnn_affine_grid_generator(ptr, theta, cn, cc, ch, cw) +} +func AtgCudnnAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, n int64, c int64, h int64, w int64) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + cc := *(*C.int64_t)(unsafe.Pointer(&c)) + ch := *(*C.int64_t)(unsafe.Pointer(&h)) + cw := *(*C.int64_t)(unsafe.Pointer(&w)) + C.atg_cudnn_affine_grid_generator_backward(ptr, grad, cn, cc, ch, cw) +} +func AtgCudnnBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) + cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) + C.atg_cudnn_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) +} +func AtgCudnnBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64, reserveSpace Ctensor) { + cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) + C.atg_cudnn_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon, reserveSpace) +} +func AtgCudnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_cudnn_convolution(ptr, self, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolution1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_cudnn_convolution1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) + cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_cudnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) + cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_cudnn_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_cudnn_convolution_transpose(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTranspose1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_cudnn_convolution_transpose1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_cudnn_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) + cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_cudnn_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnGridSampler(ptr *Ctensor, self Ctensor, grid Ctensor) { + C.atg_cudnn_grid_sampler(ptr, self, grid) +} +func AtgCudnnGridSamplerBackward(ptr *Ctensor, self Ctensor, grid Ctensor, gradOutput Ctensor) { + C.atg_cudnn_grid_sampler_backward(ptr, self, grid, gradOutput) +} +func AtgCummax(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_cummax(ptr, self, cdim) +} +func AtgCummaxOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_cummax_out(ptr, values, indices, self, cdim) +} +func AtgCummin(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_cummin(ptr, self, cdim) +} +func AtgCumminOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_cummin_out(ptr, values, indices, self, cdim) +} +func AtgCumprod(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_cumprod(ptr, self, cdim, cdtype) +} +func AtgCumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_cumprod_out(ptr, out, self, cdim, cdtype) +} +func AtgCumsum(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_cumsum(ptr, self, cdim, cdtype) +} +func AtgCumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_cumsum_out(ptr, out, self, cdim, cdtype) +} +func AtgData(ptr *Ctensor, self Ctensor) { + C.atg_data(ptr, self) +} +func AtgDequantize(ptr *Ctensor, self Ctensor) { + C.atg_dequantize(ptr, self) +} +func AtgDet(ptr *Ctensor, self Ctensor) { + C.atg_det(ptr, self) +} +func AtgDetach(ptr *Ctensor, self Ctensor) { + C.atg_detach(ptr, self) +} +func AtgDetach_(ptr *Ctensor, self Ctensor) { + C.atg_detach_(ptr, self) +} +func AtgDiag(ptr *Ctensor, self Ctensor, diagonal int64) { + cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) + C.atg_diag(ptr, self, cdiagonal) +} +func AtgDiagEmbed(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64) { + coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) + cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) + cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) + C.atg_diag_embed(ptr, self, coffset, cdim1, cdim2) +} +func AtgDiagOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64) { + cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) + C.atg_diag_out(ptr, out, self, cdiagonal) +} +func AtgDiagflat(ptr *Ctensor, self Ctensor, offset int64) { + coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) + C.atg_diagflat(ptr, self, coffset) +} +func AtgDiagonal(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64) { + coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) + cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) + cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) + C.atg_diagonal(ptr, self, coffset, cdim1, cdim2) +} +func AtgDigamma(ptr *Ctensor, self Ctensor) { + C.atg_digamma(ptr, self) +} +func AtgDigamma_(ptr *Ctensor, self Ctensor) { + C.atg_digamma_(ptr, self) +} +func AtgDigammaOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_digamma_out(ptr, out, self) +} +func AtgDist(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_dist(ptr, self, other) +} +func AtgDiv(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_div(ptr, self, other) +} +func AtgDiv1(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_div1(ptr, self, other) +} +func AtgDiv_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_div_(ptr, self, other) +} +func AtgDiv1_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_div_1(ptr, self, other) +} +func AtgDivOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_div_out(ptr, out, self, other) +} +func AtgDot(ptr *Ctensor, self Ctensor, tensor Ctensor) { + C.atg_dot(ptr, self, tensor) +} +func AtgDotOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor Ctensor) { + C.atg_dot_out(ptr, out, self, tensor) +} +func AtgDropout(ptr *Ctensor, input Ctensor, p float64, train int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_dropout(ptr, input, cp, ctrain) +} +func AtgDropout_(ptr *Ctensor, self Ctensor, p float64, train int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_dropout_(ptr, self, cp, ctrain) +} +func AtgEig(ptr *Ctensor, self Ctensor, eigenvectors int32) { + ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) + C.atg_eig(ptr, self, ceigenvectors) +} +func AtgEigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32) { + ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) + C.atg_eig_out(ptr, e, v, self, ceigenvectors) +} +func AtgEinsum(ptr *Ctensor, equation string, tensorsData []Ctensor, tensorsLen int) { + cequation := C.CString(equation) + equationLen := len(equation) + cequationLen := *(*C.int)(unsafe.Pointer(&equationLen)) + ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) + ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) + C.atg_einsum(ptr, cequation, cequationLen, ctensorsDataPtr, ctensorsLen) } -func AtgElu(ptr *Ctensor, self Ctensor){ -C.atg_elu(ptr, self) +func AtgElu(ptr *Ctensor, self Ctensor) { + C.atg_elu(ptr, self) } -func AtgElu_(ptr *Ctensor, self Ctensor){ -C.atg_elu_(ptr, self) +func AtgElu_(ptr *Ctensor, self Ctensor) { + C.atg_elu_(ptr, self) } -func AtgEluBackward(ptr *Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor){ -C.atg_elu_backward(ptr, gradOutput, alpha , scale , inputScale , output) +func AtgEluBackward(ptr *Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor) { + C.atg_elu_backward(ptr, gradOutput, alpha, scale, inputScale, output) } -func AtgEluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor){ -C.atg_elu_backward_out(ptr, gradInput, gradOutput, alpha , scale , inputScale , output) +func AtgEluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor) { + C.atg_elu_backward_out(ptr, gradInput, gradOutput, alpha, scale, inputScale, output) } -func AtgEluOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_elu_out(ptr, out, self) +func AtgEluOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_elu_out(ptr, out, self) } -func AtgEmbedding(ptr *Ctensor, weight Ctensor, indices Ctensor, paddingIdx int64, scaleGradByFreq int32, sparse int32){ -cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -csparse := *(*C.int)(unsafe.Pointer(&sparse)) -C.atg_embedding(ptr, weight, indices, cpaddingIdx, cscaleGradByFreq, csparse) +func AtgEmbedding(ptr *Ctensor, weight Ctensor, indices Ctensor, paddingIdx int64, scaleGradByFreq int32, sparse int32) { + cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + csparse := *(*C.int)(unsafe.Pointer(&sparse)) + C.atg_embedding(ptr, weight, indices, cpaddingIdx, cscaleGradByFreq, csparse) } -func AtgEmbeddingBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32, sparse int32){ -cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) -cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -csparse := *(*C.int)(unsafe.Pointer(&sparse)) -C.atg_embedding_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq, csparse) +func AtgEmbeddingBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32, sparse int32) { + cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) + cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + csparse := *(*C.int)(unsafe.Pointer(&sparse)) + C.atg_embedding_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq, csparse) } -func AtgEmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){ -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -csparse := *(*C.int)(unsafe.Pointer(&sparse)) -cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) -C.atg_embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) +func AtgEmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32) { + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + csparse := *(*C.int)(unsafe.Pointer(&sparse)) + cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) + C.atg_embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) } -func AtgEmbeddingDenseBackward(ptr *Ctensor, gradOutput Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32){ -cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) -cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -C.atg_embedding_dense_backward(ptr, gradOutput, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) +func AtgEmbeddingDenseBackward(ptr *Ctensor, gradOutput Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32) { + cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) + cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + C.atg_embedding_dense_backward(ptr, gradOutput, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) } -func AtgEmbeddingRenorm_(ptr *Ctensor, self Ctensor, indices Ctensor, maxNorm float64, normType float64){ -cmaxNorm := *(*C.double)(unsafe.Pointer(&maxNorm)) -cnormType := *(*C.double)(unsafe.Pointer(&normType)) -C.atg_embedding_renorm_(ptr, self, indices, cmaxNorm, cnormType) +func AtgEmbeddingRenorm_(ptr *Ctensor, self Ctensor, indices Ctensor, maxNorm float64, normType float64) { + cmaxNorm := *(*C.double)(unsafe.Pointer(&maxNorm)) + cnormType := *(*C.double)(unsafe.Pointer(&normType)) + C.atg_embedding_renorm_(ptr, self, indices, cmaxNorm, cnormType) } -func AtgEmbeddingSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32){ -cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) -cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) -cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) -C.atg_embedding_sparse_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) +func AtgEmbeddingSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32) { + cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) + cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) + cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) + C.atg_embedding_sparse_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) } -func AtgEmpty(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_empty(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +func AtgEmpty(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_empty(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) } -func AtgEmptyLike(ptr *Ctensor, self Ctensor){ -C.atg_empty_like(ptr, self) +func AtgEmptyLike(ptr *Ctensor, self Ctensor) { + C.atg_empty_like(ptr, self) } -func AtgEmptyOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_empty_out(ptr, out, csizeDataPtr, csizeLen) -} -func AtgEmptyStrided(ptr *Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_empty_strided(ptr, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, coptionsKind, coptionsDevice) -} -func AtgEq(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_eq(ptr, self, other ) -} -func AtgEq1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_eq1(ptr, self, other) -} -func AtgEq_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_eq_(ptr, self, other ) -} -func AtgEq1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_eq_1(ptr, self, other) -} -func AtgEqOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_eq_out(ptr, out, self, other ) -} -func AtgEqOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_eq_out1(ptr, out, self, other) -} -func AtgErf(ptr *Ctensor, self Ctensor){ -C.atg_erf(ptr, self) -} -func AtgErf_(ptr *Ctensor, self Ctensor){ -C.atg_erf_(ptr, self) -} -func AtgErfOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_erf_out(ptr, out, self) -} -func AtgErfc(ptr *Ctensor, self Ctensor){ -C.atg_erfc(ptr, self) -} -func AtgErfc_(ptr *Ctensor, self Ctensor){ -C.atg_erfc_(ptr, self) -} -func AtgErfcOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_erfc_out(ptr, out, self) -} -func AtgErfinv(ptr *Ctensor, self Ctensor){ -C.atg_erfinv(ptr, self) -} -func AtgErfinv_(ptr *Ctensor, self Ctensor){ -C.atg_erfinv_(ptr, self) -} -func AtgErfinvOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_erfinv_out(ptr, out, self) -} -func AtgExp(ptr *Ctensor, self Ctensor){ -C.atg_exp(ptr, self) -} -func AtgExp_(ptr *Ctensor, self Ctensor){ -C.atg_exp_(ptr, self) -} -func AtgExpOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_exp_out(ptr, out, self) -} -func AtgExpand(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, implicit int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -cimplicit := *(*C.int)(unsafe.Pointer(&implicit)) -C.atg_expand(ptr, self, csizeDataPtr, csizeLen, cimplicit) -} -func AtgExpandAs(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_expand_as(ptr, self, other) -} -func AtgExpm1(ptr *Ctensor, self Ctensor){ -C.atg_expm1(ptr, self) -} -func AtgExpm1_(ptr *Ctensor, self Ctensor){ -C.atg_expm1_(ptr, self) -} -func AtgExpm1Out(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_expm1_out(ptr, out, self) -} -func AtgExponential_(ptr *Ctensor, self Ctensor, lambd float64){ -clambd := *(*C.double)(unsafe.Pointer(&lambd)) -C.atg_exponential_(ptr, self, clambd) +func AtgEmptyOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_empty_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgEmptyStrided(ptr *Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_empty_strided(ptr, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, coptionsKind, coptionsDevice) +} +func AtgEq(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_eq(ptr, self, other) +} +func AtgEq1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_eq1(ptr, self, other) +} +func AtgEq_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_eq_(ptr, self, other) +} +func AtgEq1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_eq_1(ptr, self, other) +} +func AtgEqOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_eq_out(ptr, out, self, other) +} +func AtgEqOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_eq_out1(ptr, out, self, other) +} +func AtgErf(ptr *Ctensor, self Ctensor) { + C.atg_erf(ptr, self) +} +func AtgErf_(ptr *Ctensor, self Ctensor) { + C.atg_erf_(ptr, self) +} +func AtgErfOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_erf_out(ptr, out, self) +} +func AtgErfc(ptr *Ctensor, self Ctensor) { + C.atg_erfc(ptr, self) +} +func AtgErfc_(ptr *Ctensor, self Ctensor) { + C.atg_erfc_(ptr, self) +} +func AtgErfcOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_erfc_out(ptr, out, self) +} +func AtgErfinv(ptr *Ctensor, self Ctensor) { + C.atg_erfinv(ptr, self) +} +func AtgErfinv_(ptr *Ctensor, self Ctensor) { + C.atg_erfinv_(ptr, self) +} +func AtgErfinvOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_erfinv_out(ptr, out, self) +} +func AtgExp(ptr *Ctensor, self Ctensor) { + C.atg_exp(ptr, self) +} +func AtgExp_(ptr *Ctensor, self Ctensor) { + C.atg_exp_(ptr, self) +} +func AtgExpOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_exp_out(ptr, out, self) +} +func AtgExpand(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, implicit int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + cimplicit := *(*C.int)(unsafe.Pointer(&implicit)) + C.atg_expand(ptr, self, csizeDataPtr, csizeLen, cimplicit) +} +func AtgExpandAs(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_expand_as(ptr, self, other) +} +func AtgExpm1(ptr *Ctensor, self Ctensor) { + C.atg_expm1(ptr, self) +} +func AtgExpm1_(ptr *Ctensor, self Ctensor) { + C.atg_expm1_(ptr, self) +} +func AtgExpm1Out(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_expm1_out(ptr, out, self) +} +func AtgExponential_(ptr *Ctensor, self Ctensor, lambd float64) { + clambd := *(*C.double)(unsafe.Pointer(&lambd)) + C.atg_exponential_(ptr, self, clambd) } -func AtgEye(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_eye(ptr, cn, coptionsKind, coptionsDevice) +func AtgEye(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_eye(ptr, cn, coptionsKind, coptionsDevice) } -func AtgEye1(ptr *Ctensor, n int64, m int64, optionsKind int32, optionsDevice int32){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -cm := *(*C.int64_t)(unsafe.Pointer(&m)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_eye1(ptr, cn, cm, coptionsKind, coptionsDevice) +func AtgEye1(ptr *Ctensor, n int64, m int64, optionsKind int32, optionsDevice int32) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + cm := *(*C.int64_t)(unsafe.Pointer(&m)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_eye1(ptr, cn, cm, coptionsKind, coptionsDevice) } -func AtgEyeOut(ptr *Ctensor, out Ctensor, n int64){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -C.atg_eye_out(ptr, out, cn) +func AtgEyeOut(ptr *Ctensor, out Ctensor, n int64) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + C.atg_eye_out(ptr, out, cn) } -func AtgEyeOut1(ptr *Ctensor, out Ctensor, n int64, m int64){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -cm := *(*C.int64_t)(unsafe.Pointer(&m)) -C.atg_eye_out1(ptr, out, cn, cm) +func AtgEyeOut1(ptr *Ctensor, out Ctensor, n int64, m int64) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + cm := *(*C.int64_t)(unsafe.Pointer(&m)) + C.atg_eye_out1(ptr, out, cn, cm) } -func AtgFakeQuantizePerChannelAffine(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){ -caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) -cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) -cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) -C.atg_fake_quantize_per_channel_affine(ptr, self, scale, zeroPoint, caxis, cquantMin, cquantMax) +func AtgFakeQuantizePerChannelAffine(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64) { + caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) + cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) + cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) + C.atg_fake_quantize_per_channel_affine(ptr, self, scale, zeroPoint, caxis, cquantMin, cquantMax) } -func AtgFakeQuantizePerChannelAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){ -caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) -cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) -cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) -C.atg_fake_quantize_per_channel_affine_backward(ptr, grad, self, scale, zeroPoint, caxis, cquantMin, cquantMax) +func AtgFakeQuantizePerChannelAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64) { + caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) + cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) + cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) + C.atg_fake_quantize_per_channel_affine_backward(ptr, grad, self, scale, zeroPoint, caxis, cquantMin, cquantMax) } -func AtgFakeQuantizePerTensorAffine(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64){ -cscale := *(*C.double)(unsafe.Pointer(&scale)) -czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) -cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) -cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) -C.atg_fake_quantize_per_tensor_affine(ptr, self, cscale, czeroPoint, cquantMin, cquantMax) +func AtgFakeQuantizePerTensorAffine(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64) { + cscale := *(*C.double)(unsafe.Pointer(&scale)) + czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) + cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) + cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) + C.atg_fake_quantize_per_tensor_affine(ptr, self, cscale, czeroPoint, cquantMin, cquantMax) } -func AtgFakeQuantizePerTensorAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64){ -cscale := *(*C.double)(unsafe.Pointer(&scale)) -czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) -cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) -cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) -C.atg_fake_quantize_per_tensor_affine_backward(ptr, grad, self, cscale, czeroPoint, cquantMin, cquantMax) +func AtgFakeQuantizePerTensorAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64) { + cscale := *(*C.double)(unsafe.Pointer(&scale)) + czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) + cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) + cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) + C.atg_fake_quantize_per_tensor_affine_backward(ptr, grad, self, cscale, czeroPoint, cquantMin, cquantMax) } -func AtgFbgemmLinearFp16Weight(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor){ -C.atg_fbgemm_linear_fp16_weight(ptr, input, packedWeight, bias) +func AtgFbgemmLinearFp16Weight(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor) { + C.atg_fbgemm_linear_fp16_weight(ptr, input, packedWeight, bias) } -func AtgFbgemmLinearFp16WeightFp32Activation(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor){ -C.atg_fbgemm_linear_fp16_weight_fp32_activation(ptr, input, packedWeight, bias) +func AtgFbgemmLinearFp16WeightFp32Activation(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor) { + C.atg_fbgemm_linear_fp16_weight_fp32_activation(ptr, input, packedWeight, bias) } -func AtgFbgemmLinearInt8Weight(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor){ -C.atg_fbgemm_linear_int8_weight(ptr, input, weight, packed, colOffsets, weightScale , weightZeroPoint , bias) +func AtgFbgemmLinearInt8Weight(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor) { + C.atg_fbgemm_linear_int8_weight(ptr, input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) } -func AtgFbgemmLinearInt8WeightFp32Activation(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor){ -C.atg_fbgemm_linear_int8_weight_fp32_activation(ptr, input, weight, packed, colOffsets, weightScale , weightZeroPoint , bias) +func AtgFbgemmLinearInt8WeightFp32Activation(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor) { + C.atg_fbgemm_linear_int8_weight_fp32_activation(ptr, input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) } -func AtgFbgemmPackGemmMatrixFp16(ptr *Ctensor, input Ctensor){ -C.atg_fbgemm_pack_gemm_matrix_fp16(ptr, input) +func AtgFbgemmPackGemmMatrixFp16(ptr *Ctensor, input Ctensor) { + C.atg_fbgemm_pack_gemm_matrix_fp16(ptr, input) } -func AtgFbgemmPackQuantizedMatrix(ptr *Ctensor, input Ctensor){ -C.atg_fbgemm_pack_quantized_matrix(ptr, input) +func AtgFbgemmPackQuantizedMatrix(ptr *Ctensor, input Ctensor) { + C.atg_fbgemm_pack_quantized_matrix(ptr, input) } -func AtgFbgemmPackQuantizedMatrix1(ptr *Ctensor, input Ctensor, k int64, n int64){ -ck := *(*C.int64_t)(unsafe.Pointer(&k)) -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -C.atg_fbgemm_pack_quantized_matrix1(ptr, input, ck, cn) +func AtgFbgemmPackQuantizedMatrix1(ptr *Ctensor, input Ctensor, k int64, n int64) { + ck := *(*C.int64_t)(unsafe.Pointer(&k)) + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + C.atg_fbgemm_pack_quantized_matrix1(ptr, input, ck, cn) } -func AtgFeatureAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -C.atg_feature_alpha_dropout(ptr, input, cp, ctrain) +func AtgFeatureAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_feature_alpha_dropout(ptr, input, cp, ctrain) } -func AtgFeatureAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -C.atg_feature_alpha_dropout_(ptr, self, cp, ctrain) +func AtgFeatureAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_feature_alpha_dropout_(ptr, self, cp, ctrain) } -func AtgFeatureDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -C.atg_feature_dropout(ptr, input, cp, ctrain) -} -func AtgFeatureDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -C.atg_feature_dropout_(ptr, self, cp, ctrain) -} -func AtgFft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32){ -csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) -cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) -C.atg_fft(ptr, self, csignalNdim, cnormalized) -} -func AtgFill_(ptr *Ctensor, self Ctensor, value Cscalar){ -C.atg_fill_(ptr, self, value ) -} -func AtgFill1_(ptr *Ctensor, self Ctensor, value Ctensor){ -C.atg_fill_1(ptr, self, value) -} -func AtgFillDiagonal_(ptr *Ctensor, self Ctensor, fillValue Cscalar, wrap int32){ -cwrap := *(*C.int)(unsafe.Pointer(&wrap)) -C.atg_fill_diagonal_(ptr, self, fillValue , cwrap) -} -func AtgFlatten(ptr *Ctensor, self Ctensor, startDim int64, endDim int64){ -cstartDim := *(*C.int64_t)(unsafe.Pointer(&startDim)) -cendDim := *(*C.int64_t)(unsafe.Pointer(&endDim)) -C.atg_flatten(ptr, self, cstartDim, cendDim) -} -func AtgFlip(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int){ -cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) -cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) -C.atg_flip(ptr, self, cdimsDataPtr, cdimsLen) -} -func AtgFloor(ptr *Ctensor, self Ctensor){ -C.atg_floor(ptr, self) -} -func AtgFloor_(ptr *Ctensor, self Ctensor){ -C.atg_floor_(ptr, self) -} -func AtgFloorDivide(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_floor_divide(ptr, self, other) -} -func AtgFloorDivide1(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_floor_divide1(ptr, self, other ) -} -func AtgFloorDivide_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_floor_divide_(ptr, self, other) -} -func AtgFloorDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_floor_divide_1(ptr, self, other ) -} -func AtgFloorDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_floor_divide_out(ptr, out, self, other) -} -func AtgFloorOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_floor_out(ptr, out, self) -} -func AtgFmod(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_fmod(ptr, self, other ) -} -func AtgFmod1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_fmod1(ptr, self, other) -} -func AtgFmod_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_fmod_(ptr, self, other ) -} -func AtgFmod1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_fmod_1(ptr, self, other) -} -func AtgFmodOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_fmod_out(ptr, out, self, other ) -} -func AtgFmodOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_fmod_out1(ptr, out, self, other) -} -func AtgFrac(ptr *Ctensor, self Ctensor){ -C.atg_frac(ptr, self) -} -func AtgFrac_(ptr *Ctensor, self Ctensor){ -C.atg_frac_(ptr, self) -} -func AtgFracOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_frac_out(ptr, out, self) -} -func AtgFractionalMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_fractional_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) -} -func AtgFractionalMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_fractional_max_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) -} -func AtgFractionalMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_fractional_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) -} -func AtgFractionalMaxPool2dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_fractional_max_pool2d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) -} -func AtgFractionalMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_fractional_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) -} -func AtgFractionalMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_fractional_max_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) -} -func AtgFractionalMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_fractional_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) -} -func AtgFractionalMaxPool3dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_fractional_max_pool3d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) -} -func AtgFrobeniusNorm(ptr *Ctensor, self Ctensor){ -C.atg_frobenius_norm(ptr, self) -} -func AtgFrobeniusNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_frobenius_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgFrobeniusNormOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_frobenius_norm_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgFromFile(ptr *Ctensor, filename string, shared int32, size int64, optionsKind int32, optionsDevice int32){ -cfilename := C.CString(filename) -filenameLen := len(filename) -cfilenameLen := *(*C.int)(unsafe.Pointer(&filenameLen)) -cshared := *(*C.int)(unsafe.Pointer(&shared)) -csize := *(*C.int64_t)(unsafe.Pointer(&size)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_from_file(ptr, cfilename, cfilenameLen, cshared, csize, coptionsKind, coptionsDevice) -} -func AtgFull(ptr *Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_full(ptr, csizeDataPtr, csizeLen, fillValue , coptionsKind, coptionsDevice) -} -func AtgFullLike(ptr *Ctensor, self Ctensor, fillValue Cscalar){ -C.atg_full_like(ptr, self, fillValue ) -} -func AtgFullOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_full_out(ptr, out, csizeDataPtr, csizeLen, fillValue ) -} -func AtgGather(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) -C.atg_gather(ptr, self, cdim, index, csparseGrad) -} -func AtgGatherOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) -C.atg_gather_out(ptr, out, self, cdim, index, csparseGrad) -} -func AtgGe(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_ge(ptr, self, other ) -} -func AtgGe1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_ge1(ptr, self, other) -} -func AtgGe_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_ge_(ptr, self, other ) -} -func AtgGe1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_ge_1(ptr, self, other) -} -func AtgGeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_ge_out(ptr, out, self, other ) -} -func AtgGeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_ge_out1(ptr, out, self, other) -} -func AtgGelu(ptr *Ctensor, self Ctensor){ -C.atg_gelu(ptr, self) -} -func AtgGeluBackward(ptr *Ctensor, grad Ctensor, self Ctensor){ -C.atg_gelu_backward(ptr, grad, self) -} -func AtgGeometric_(ptr *Ctensor, self Ctensor, p float64){ -cp := *(*C.double)(unsafe.Pointer(&p)) -C.atg_geometric_(ptr, self, cp) -} -func AtgGeqrf(ptr *Ctensor, self Ctensor){ -C.atg_geqrf(ptr, self) -} -func AtgGeqrfOut(ptr *Ctensor, a Ctensor, tau Ctensor, self Ctensor){ -C.atg_geqrf_out(ptr, a, tau, self) -} -func AtgGer(ptr *Ctensor, self Ctensor, vec2 Ctensor){ -C.atg_ger(ptr, self, vec2) -} -func AtgGerOut(ptr *Ctensor, out Ctensor, self Ctensor, vec2 Ctensor){ -C.atg_ger_out(ptr, out, self, vec2) -} -func AtgGlu(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_glu(ptr, self, cdim) -} -func AtgGluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_glu_backward(ptr, gradOutput, self, cdim) -} -func AtgGluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_glu_backward_out(ptr, gradInput, gradOutput, self, cdim) -} -func AtgGluOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_glu_out(ptr, out, self, cdim) -} -func AtgGrad(ptr *Ctensor, self Ctensor){ -C.atg_grad(ptr, self) -} -func AtgGridSampler(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ -cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) -cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -C.atg_grid_sampler(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGridSampler2d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ -cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) -cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -C.atg_grid_sampler_2d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGridSampler2dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ -cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) -cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -C.atg_grid_sampler_2d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGridSampler3d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ -cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) -cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -C.atg_grid_sampler_3d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGridSampler3dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ -cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) -cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -C.atg_grid_sampler_3d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGroupNorm(ptr *Ctensor, input Ctensor, numGroups int64, weight Ctensor, bias Ctensor, eps float64, cudnnEnabled int32){ -cnumGroups := *(*C.int64_t)(unsafe.Pointer(&numGroups)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) -C.atg_group_norm(ptr, input, cnumGroups, weight, bias, ceps, ccudnnEnabled) -} -func AtgGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -C.atg_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -C.atg_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ -C.atg_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh) -} -func AtgGt(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_gt(ptr, self, other ) -} -func AtgGt1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_gt1(ptr, self, other) -} -func AtgGt_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_gt_(ptr, self, other ) -} -func AtgGt1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_gt_1(ptr, self, other) -} -func AtgGtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_gt_out(ptr, out, self, other ) -} -func AtgGtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_gt_out1(ptr, out, self, other) -} -func AtgHammingWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_hamming_window(ptr, cwindowLength, coptionsKind, coptionsDevice) -} -func AtgHammingWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_hamming_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) -} -func AtgHammingWindow2(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) -calpha := *(*C.double)(unsafe.Pointer(&alpha)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_hamming_window2(ptr, cwindowLength, cperiodic, calpha, coptionsKind, coptionsDevice) -} -func AtgHammingWindow3(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, beta float64, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) -calpha := *(*C.double)(unsafe.Pointer(&alpha)) -cbeta := *(*C.double)(unsafe.Pointer(&beta)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_hamming_window3(ptr, cwindowLength, cperiodic, calpha, cbeta, coptionsKind, coptionsDevice) -} -func AtgHannWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_hann_window(ptr, cwindowLength, coptionsKind, coptionsDevice) -} -func AtgHannWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ -cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) -cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_hann_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) -} -func AtgHardshrink(ptr *Ctensor, self Ctensor){ -C.atg_hardshrink(ptr, self) -} -func AtgHardshrinkBackward(ptr *Ctensor, gradOut Ctensor, self Ctensor, lambd Cscalar){ -C.atg_hardshrink_backward(ptr, gradOut, self, lambd ) -} -func AtgHardsigmoid(ptr *Ctensor, self Ctensor){ -C.atg_hardsigmoid(ptr, self) -} -func AtgHardsigmoid_(ptr *Ctensor, self Ctensor){ -C.atg_hardsigmoid_(ptr, self) -} -func AtgHardsigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ -C.atg_hardsigmoid_backward(ptr, gradOutput, self) -} -func AtgHardsigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_hardsigmoid_out(ptr, out, self) -} -func AtgHardtanh(ptr *Ctensor, self Ctensor){ -C.atg_hardtanh(ptr, self) -} -func AtgHardtanh_(ptr *Ctensor, self Ctensor){ -C.atg_hardtanh_(ptr, self) -} -func AtgHardtanhBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar){ -C.atg_hardtanh_backward(ptr, gradOutput, self, minVal , maxVal ) -} -func AtgHardtanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar){ -C.atg_hardtanh_backward_out(ptr, gradInput, gradOutput, self, minVal , maxVal ) -} -func AtgHardtanhOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_hardtanh_out(ptr, out, self) -} -func AtgHingeEmbeddingLoss(ptr *Ctensor, self Ctensor, target Ctensor, margin float64, reduction int64){ -cmargin := *(*C.double)(unsafe.Pointer(&margin)) -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_hinge_embedding_loss(ptr, self, target, cmargin, creduction) -} -func AtgHistc(ptr *Ctensor, self Ctensor, bins int64){ -cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) -C.atg_histc(ptr, self, cbins) -} -func AtgHistcOut(ptr *Ctensor, out Ctensor, self Ctensor, bins int64){ -cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) -C.atg_histc_out(ptr, out, self, cbins) -} -func AtgHspmm(ptr *Ctensor, mat1 Ctensor, mat2 Ctensor){ -C.atg_hspmm(ptr, mat1, mat2) -} -func AtgHspmmOut(ptr *Ctensor, out Ctensor, mat1 Ctensor, mat2 Ctensor){ -C.atg_hspmm_out(ptr, out, mat1, mat2) -} -func AtgIfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32){ -csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) -cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) -C.atg_ifft(ptr, self, csignalNdim, cnormalized) -} -func AtgIm2col(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg_im2col(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgIm2colBackward(ptr *Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg_im2col_backward(ptr, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgIm2colBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg_im2col_backward_out(ptr, gradInput, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgIm2colOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -C.atg_im2col_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgImag(ptr *Ctensor, self Ctensor){ -C.atg_imag(ptr, self) -} -func AtgIndex(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int){ -cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) -cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) -C.atg_index(ptr, self, cindicesDataPtr, cindicesLen) -} -func AtgIndexAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_add(ptr, self, cdim, index, source) -} -func AtgIndexAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_add_(ptr, self, cdim, index, source) -} -func AtgIndexCopy(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_copy(ptr, self, cdim, index, source) -} -func AtgIndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_copy_(ptr, self, cdim, index, source) -} -func AtgIndexFill(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_fill(ptr, self, cdim, index, value ) -} -func AtgIndexFill1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_fill1(ptr, self, cdim, index, value) -} -func AtgIndexFill_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_fill_(ptr, self, cdim, index, value ) -} -func AtgIndexFill1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_fill_1(ptr, self, cdim, index, value) -} -func AtgIndexPut(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32){ -cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) -cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) -caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) -C.atg_index_put(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) -} -func AtgIndexPut_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32){ -cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) -cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) -caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) -C.atg_index_put_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) -} -func AtgIndexSelect(ptr *Ctensor, self Ctensor, dim int64, index Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_select(ptr, self, cdim, index) -} -func AtgIndexSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_index_select_out(ptr, out, self, cdim, index) -} -func AtgIndices(ptr *Ctensor, self Ctensor){ -C.atg_indices(ptr, self) -} -func AtgInstanceNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, useInputStats int32, momentum float64, eps float64, cudnnEnabled int32){ -cuseInputStats := *(*C.int)(unsafe.Pointer(&useInputStats)) -cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) -C.atg_instance_norm(ptr, input, weight, bias, runningMean, runningVar, cuseInputStats, cmomentum, ceps, ccudnnEnabled) -} -func AtgIntRepr(ptr *Ctensor, self Ctensor){ -C.atg_int_repr(ptr, self) -} -func AtgInverse(ptr *Ctensor, self Ctensor){ -C.atg_inverse(ptr, self) -} -func AtgInverseOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_inverse_out(ptr, out, self) -} -func AtgIrfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32, signalSizesData []int64, signalSizesLen int){ -csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) -cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) -conesided := *(*C.int)(unsafe.Pointer(&onesided)) -csignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&signalSizesData[0])) -csignalSizesLen := *(*C.int)(unsafe.Pointer(&signalSizesLen)) -C.atg_irfft(ptr, self, csignalNdim, cnormalized, conesided, csignalSizesDataPtr, csignalSizesLen) -} -func AtgIsclose(ptr *Ctensor, self Ctensor, other Ctensor, rtol float64, atol float64, equalNan int32){ -crtol := *(*C.double)(unsafe.Pointer(&rtol)) -catol := *(*C.double)(unsafe.Pointer(&atol)) -cequalNan := *(*C.int)(unsafe.Pointer(&equalNan)) -C.atg_isclose(ptr, self, other, crtol, catol, cequalNan) -} -func AtgIsfinite(ptr *Ctensor, self Ctensor){ -C.atg_isfinite(ptr, self) +func AtgFeatureDropout(ptr *Ctensor, input Ctensor, p float64, train int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_feature_dropout(ptr, input, cp, ctrain) +} +func AtgFeatureDropout_(ptr *Ctensor, self Ctensor, p float64, train int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + C.atg_feature_dropout_(ptr, self, cp, ctrain) +} +func AtgFft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32) { + csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) + cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) + C.atg_fft(ptr, self, csignalNdim, cnormalized) +} +func AtgFill_(ptr *Ctensor, self Ctensor, value Cscalar) { + C.atg_fill_(ptr, self, value) +} +func AtgFill1_(ptr *Ctensor, self Ctensor, value Ctensor) { + C.atg_fill_1(ptr, self, value) +} +func AtgFillDiagonal_(ptr *Ctensor, self Ctensor, fillValue Cscalar, wrap int32) { + cwrap := *(*C.int)(unsafe.Pointer(&wrap)) + C.atg_fill_diagonal_(ptr, self, fillValue, cwrap) +} +func AtgFlatten(ptr *Ctensor, self Ctensor, startDim int64, endDim int64) { + cstartDim := *(*C.int64_t)(unsafe.Pointer(&startDim)) + cendDim := *(*C.int64_t)(unsafe.Pointer(&endDim)) + C.atg_flatten(ptr, self, cstartDim, cendDim) +} +func AtgFlip(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int) { + cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) + cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) + C.atg_flip(ptr, self, cdimsDataPtr, cdimsLen) +} +func AtgFloor(ptr *Ctensor, self Ctensor) { + C.atg_floor(ptr, self) +} +func AtgFloor_(ptr *Ctensor, self Ctensor) { + C.atg_floor_(ptr, self) +} +func AtgFloorDivide(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_floor_divide(ptr, self, other) +} +func AtgFloorDivide1(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_floor_divide1(ptr, self, other) +} +func AtgFloorDivide_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_floor_divide_(ptr, self, other) +} +func AtgFloorDivide1_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_floor_divide_1(ptr, self, other) +} +func AtgFloorDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_floor_divide_out(ptr, out, self, other) +} +func AtgFloorOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_floor_out(ptr, out, self) +} +func AtgFmod(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_fmod(ptr, self, other) +} +func AtgFmod1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_fmod1(ptr, self, other) +} +func AtgFmod_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_fmod_(ptr, self, other) +} +func AtgFmod1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_fmod_1(ptr, self, other) +} +func AtgFmodOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_fmod_out(ptr, out, self, other) +} +func AtgFmodOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_fmod_out1(ptr, out, self, other) +} +func AtgFrac(ptr *Ctensor, self Ctensor) { + C.atg_frac(ptr, self) +} +func AtgFrac_(ptr *Ctensor, self Ctensor) { + C.atg_frac_(ptr, self) +} +func AtgFracOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_frac_out(ptr, out, self) +} +func AtgFractionalMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_fractional_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_fractional_max_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_fractional_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool2dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_fractional_max_pool2d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_fractional_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_fractional_max_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_fractional_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool3dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_fractional_max_pool3d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFrobeniusNorm(ptr *Ctensor, self Ctensor) { + C.atg_frobenius_norm(ptr, self) +} +func AtgFrobeniusNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_frobenius_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgFrobeniusNormOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_frobenius_norm_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgFromFile(ptr *Ctensor, filename string, shared int32, size int64, optionsKind int32, optionsDevice int32) { + cfilename := C.CString(filename) + filenameLen := len(filename) + cfilenameLen := *(*C.int)(unsafe.Pointer(&filenameLen)) + cshared := *(*C.int)(unsafe.Pointer(&shared)) + csize := *(*C.int64_t)(unsafe.Pointer(&size)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_from_file(ptr, cfilename, cfilenameLen, cshared, csize, coptionsKind, coptionsDevice) +} +func AtgFull(ptr *Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_full(ptr, csizeDataPtr, csizeLen, fillValue, coptionsKind, coptionsDevice) +} +func AtgFullLike(ptr *Ctensor, self Ctensor, fillValue Cscalar) { + C.atg_full_like(ptr, self, fillValue) +} +func AtgFullOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_full_out(ptr, out, csizeDataPtr, csizeLen, fillValue) +} +func AtgGather(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) + C.atg_gather(ptr, self, cdim, index, csparseGrad) +} +func AtgGatherOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) + C.atg_gather_out(ptr, out, self, cdim, index, csparseGrad) +} +func AtgGe(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_ge(ptr, self, other) +} +func AtgGe1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_ge1(ptr, self, other) +} +func AtgGe_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_ge_(ptr, self, other) +} +func AtgGe1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_ge_1(ptr, self, other) +} +func AtgGeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_ge_out(ptr, out, self, other) +} +func AtgGeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_ge_out1(ptr, out, self, other) +} +func AtgGelu(ptr *Ctensor, self Ctensor) { + C.atg_gelu(ptr, self) +} +func AtgGeluBackward(ptr *Ctensor, grad Ctensor, self Ctensor) { + C.atg_gelu_backward(ptr, grad, self) +} +func AtgGeometric_(ptr *Ctensor, self Ctensor, p float64) { + cp := *(*C.double)(unsafe.Pointer(&p)) + C.atg_geometric_(ptr, self, cp) +} +func AtgGeqrf(ptr *Ctensor, self Ctensor) { + C.atg_geqrf(ptr, self) +} +func AtgGeqrfOut(ptr *Ctensor, a Ctensor, tau Ctensor, self Ctensor) { + C.atg_geqrf_out(ptr, a, tau, self) +} +func AtgGer(ptr *Ctensor, self Ctensor, vec2 Ctensor) { + C.atg_ger(ptr, self, vec2) +} +func AtgGerOut(ptr *Ctensor, out Ctensor, self Ctensor, vec2 Ctensor) { + C.atg_ger_out(ptr, out, self, vec2) +} +func AtgGlu(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_glu(ptr, self, cdim) +} +func AtgGluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_glu_backward(ptr, gradOutput, self, cdim) +} +func AtgGluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_glu_backward_out(ptr, gradInput, gradOutput, self, cdim) +} +func AtgGluOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_glu_out(ptr, out, self, cdim) +} +func AtgGrad(ptr *Ctensor, self Ctensor) { + C.atg_grad(ptr, self) +} +func AtgGridSampler(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { + cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) + cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + C.atg_grid_sampler(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler2d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { + cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) + cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + C.atg_grid_sampler_2d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler2dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { + cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) + cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + C.atg_grid_sampler_2d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler3d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { + cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) + cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + C.atg_grid_sampler_3d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler3dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { + cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) + cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + C.atg_grid_sampler_3d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGroupNorm(ptr *Ctensor, input Ctensor, numGroups int64, weight Ctensor, bias Ctensor, eps float64, cudnnEnabled int32) { + cnumGroups := *(*C.int64_t)(unsafe.Pointer(&numGroups)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) + C.atg_group_norm(ptr, input, cnumGroups, weight, bias, ceps, ccudnnEnabled) +} +func AtgGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + C.atg_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + C.atg_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor) { + C.atg_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgGt(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_gt(ptr, self, other) +} +func AtgGt1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_gt1(ptr, self, other) +} +func AtgGt_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_gt_(ptr, self, other) +} +func AtgGt1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_gt_1(ptr, self, other) +} +func AtgGtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_gt_out(ptr, out, self, other) +} +func AtgGtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_gt_out1(ptr, out, self, other) +} +func AtgHammingWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_hamming_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgHammingWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_hamming_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgHammingWindow2(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) + calpha := *(*C.double)(unsafe.Pointer(&alpha)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_hamming_window2(ptr, cwindowLength, cperiodic, calpha, coptionsKind, coptionsDevice) +} +func AtgHammingWindow3(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, beta float64, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) + calpha := *(*C.double)(unsafe.Pointer(&alpha)) + cbeta := *(*C.double)(unsafe.Pointer(&beta)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_hamming_window3(ptr, cwindowLength, cperiodic, calpha, cbeta, coptionsKind, coptionsDevice) +} +func AtgHannWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_hann_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgHannWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32) { + cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) + cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_hann_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgHardshrink(ptr *Ctensor, self Ctensor) { + C.atg_hardshrink(ptr, self) +} +func AtgHardshrinkBackward(ptr *Ctensor, gradOut Ctensor, self Ctensor, lambd Cscalar) { + C.atg_hardshrink_backward(ptr, gradOut, self, lambd) +} +func AtgHardsigmoid(ptr *Ctensor, self Ctensor) { + C.atg_hardsigmoid(ptr, self) +} +func AtgHardsigmoid_(ptr *Ctensor, self Ctensor) { + C.atg_hardsigmoid_(ptr, self) +} +func AtgHardsigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor) { + C.atg_hardsigmoid_backward(ptr, gradOutput, self) +} +func AtgHardsigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_hardsigmoid_out(ptr, out, self) +} +func AtgHardtanh(ptr *Ctensor, self Ctensor) { + C.atg_hardtanh(ptr, self) +} +func AtgHardtanh_(ptr *Ctensor, self Ctensor) { + C.atg_hardtanh_(ptr, self) +} +func AtgHardtanhBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar) { + C.atg_hardtanh_backward(ptr, gradOutput, self, minVal, maxVal) +} +func AtgHardtanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar) { + C.atg_hardtanh_backward_out(ptr, gradInput, gradOutput, self, minVal, maxVal) +} +func AtgHardtanhOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_hardtanh_out(ptr, out, self) +} +func AtgHingeEmbeddingLoss(ptr *Ctensor, self Ctensor, target Ctensor, margin float64, reduction int64) { + cmargin := *(*C.double)(unsafe.Pointer(&margin)) + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_hinge_embedding_loss(ptr, self, target, cmargin, creduction) +} +func AtgHistc(ptr *Ctensor, self Ctensor, bins int64) { + cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) + C.atg_histc(ptr, self, cbins) +} +func AtgHistcOut(ptr *Ctensor, out Ctensor, self Ctensor, bins int64) { + cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) + C.atg_histc_out(ptr, out, self, cbins) +} +func AtgHspmm(ptr *Ctensor, mat1 Ctensor, mat2 Ctensor) { + C.atg_hspmm(ptr, mat1, mat2) +} +func AtgHspmmOut(ptr *Ctensor, out Ctensor, mat1 Ctensor, mat2 Ctensor) { + C.atg_hspmm_out(ptr, out, mat1, mat2) +} +func AtgIfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32) { + csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) + cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) + C.atg_ifft(ptr, self, csignalNdim, cnormalized) +} +func AtgIm2col(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg_im2col(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colBackward(ptr *Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg_im2col_backward(ptr, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg_im2col_backward_out(ptr, gradInput, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + C.atg_im2col_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgImag(ptr *Ctensor, self Ctensor) { + C.atg_imag(ptr, self) +} +func AtgIndex(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int) { + cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) + cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) + C.atg_index(ptr, self, cindicesDataPtr, cindicesLen) +} +func AtgIndexAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_add(ptr, self, cdim, index, source) +} +func AtgIndexAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_add_(ptr, self, cdim, index, source) +} +func AtgIndexCopy(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_copy(ptr, self, cdim, index, source) +} +func AtgIndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_copy_(ptr, self, cdim, index, source) +} +func AtgIndexFill(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_fill(ptr, self, cdim, index, value) +} +func AtgIndexFill1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_fill1(ptr, self, cdim, index, value) +} +func AtgIndexFill_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_fill_(ptr, self, cdim, index, value) +} +func AtgIndexFill1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_fill_1(ptr, self, cdim, index, value) +} +func AtgIndexPut(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32) { + cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) + cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) + caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) + C.atg_index_put(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) +} +func AtgIndexPut_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32) { + cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) + cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) + caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) + C.atg_index_put_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) +} +func AtgIndexSelect(ptr *Ctensor, self Ctensor, dim int64, index Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_select(ptr, self, cdim, index) +} +func AtgIndexSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_index_select_out(ptr, out, self, cdim, index) +} +func AtgIndices(ptr *Ctensor, self Ctensor) { + C.atg_indices(ptr, self) +} +func AtgInstanceNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, useInputStats int32, momentum float64, eps float64, cudnnEnabled int32) { + cuseInputStats := *(*C.int)(unsafe.Pointer(&useInputStats)) + cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) + C.atg_instance_norm(ptr, input, weight, bias, runningMean, runningVar, cuseInputStats, cmomentum, ceps, ccudnnEnabled) +} +func AtgIntRepr(ptr *Ctensor, self Ctensor) { + C.atg_int_repr(ptr, self) +} +func AtgInverse(ptr *Ctensor, self Ctensor) { + C.atg_inverse(ptr, self) +} +func AtgInverseOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_inverse_out(ptr, out, self) +} +func AtgIrfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32, signalSizesData []int64, signalSizesLen int) { + csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) + cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) + conesided := *(*C.int)(unsafe.Pointer(&onesided)) + csignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&signalSizesData[0])) + csignalSizesLen := *(*C.int)(unsafe.Pointer(&signalSizesLen)) + C.atg_irfft(ptr, self, csignalNdim, cnormalized, conesided, csignalSizesDataPtr, csignalSizesLen) +} +func AtgIsclose(ptr *Ctensor, self Ctensor, other Ctensor, rtol float64, atol float64, equalNan int32) { + crtol := *(*C.double)(unsafe.Pointer(&rtol)) + catol := *(*C.double)(unsafe.Pointer(&atol)) + cequalNan := *(*C.int)(unsafe.Pointer(&equalNan)) + C.atg_isclose(ptr, self, other, crtol, catol, cequalNan) +} +func AtgIsfinite(ptr *Ctensor, self Ctensor) { + C.atg_isfinite(ptr, self) } -func AtgIsinf(ptr *Ctensor, self Ctensor){ -C.atg_isinf(ptr, self) +func AtgIsinf(ptr *Ctensor, self Ctensor) { + C.atg_isinf(ptr, self) } -func AtgIsnan(ptr *Ctensor, self Ctensor){ -C.atg_isnan(ptr, self) +func AtgIsnan(ptr *Ctensor, self Ctensor) { + C.atg_isnan(ptr, self) } -func AtgKlDiv(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_kl_div(ptr, self, target, creduction) +func AtgKlDiv(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_kl_div(ptr, self, target, creduction) } -func AtgKlDivBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_kl_div_backward(ptr, gradOutput, self, target, creduction) +func AtgKlDivBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_kl_div_backward(ptr, gradOutput, self, target, creduction) } -func AtgKthvalue(ptr *Ctensor, self Ctensor, k int64, dim int64, keepdim int32){ -ck := *(*C.int64_t)(unsafe.Pointer(&k)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_kthvalue(ptr, self, ck, cdim, ckeepdim) +func AtgKthvalue(ptr *Ctensor, self Ctensor, k int64, dim int64, keepdim int32) { + ck := *(*C.int64_t)(unsafe.Pointer(&k)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_kthvalue(ptr, self, ck, cdim, ckeepdim) } -func AtgKthvalueOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, keepdim int32){ -ck := *(*C.int64_t)(unsafe.Pointer(&k)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_kthvalue_out(ptr, values, indices, self, ck, cdim, ckeepdim) +func AtgKthvalueOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, keepdim int32) { + ck := *(*C.int64_t)(unsafe.Pointer(&k)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_kthvalue_out(ptr, values, indices, self, ck, cdim, ckeepdim) } -func AtgL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_l1_loss(ptr, self, target, creduction) +func AtgL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_l1_loss(ptr, self, target, creduction) } -func AtgL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_l1_loss_backward(ptr, gradOutput, self, target, creduction) +func AtgL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_l1_loss_backward(ptr, gradOutput, self, target, creduction) } -func AtgL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +func AtgL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) } -func AtgL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_l1_loss_out(ptr, out, self, target, creduction) +func AtgL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_l1_loss_out(ptr, out, self, target, creduction) } -func AtgLayerNorm(ptr *Ctensor, input Ctensor, normalizedShapeData []int64, normalizedShapeLen int, weight Ctensor, bias Ctensor, eps float64, cudnnEnable int32){ -cnormalizedShapeDataPtr := (*C.int64_t)(unsafe.Pointer(&normalizedShapeData[0])) -cnormalizedShapeLen := *(*C.int)(unsafe.Pointer(&normalizedShapeLen)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -ccudnnEnable := *(*C.int)(unsafe.Pointer(&cudnnEnable)) -C.atg_layer_norm(ptr, input, cnormalizedShapeDataPtr, cnormalizedShapeLen, weight, bias, ceps, ccudnnEnable) +func AtgLayerNorm(ptr *Ctensor, input Ctensor, normalizedShapeData []int64, normalizedShapeLen int, weight Ctensor, bias Ctensor, eps float64, cudnnEnable int32) { + cnormalizedShapeDataPtr := (*C.int64_t)(unsafe.Pointer(&normalizedShapeData[0])) + cnormalizedShapeLen := *(*C.int)(unsafe.Pointer(&normalizedShapeLen)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + ccudnnEnable := *(*C.int)(unsafe.Pointer(&cudnnEnable)) + C.atg_layer_norm(ptr, input, cnormalizedShapeDataPtr, cnormalizedShapeLen, weight, bias, ceps, ccudnnEnable) } -func AtgLe(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_le(ptr, self, other ) +func AtgLe(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_le(ptr, self, other) } -func AtgLe1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_le1(ptr, self, other) +func AtgLe1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_le1(ptr, self, other) } -func AtgLe_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_le_(ptr, self, other ) +func AtgLe_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_le_(ptr, self, other) } -func AtgLe1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_le_1(ptr, self, other) +func AtgLe1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_le_1(ptr, self, other) } -func AtgLeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_le_out(ptr, out, self, other ) +func AtgLeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_le_out(ptr, out, self, other) } -func AtgLeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_le_out1(ptr, out, self, other) +func AtgLeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_le_out1(ptr, out, self, other) } -func AtgLeakyRelu(ptr *Ctensor, self Ctensor){ -C.atg_leaky_relu(ptr, self) +func AtgLeakyRelu(ptr *Ctensor, self Ctensor) { + C.atg_leaky_relu(ptr, self) } -func AtgLeakyRelu_(ptr *Ctensor, self Ctensor){ -C.atg_leaky_relu_(ptr, self) +func AtgLeakyRelu_(ptr *Ctensor, self Ctensor) { + C.atg_leaky_relu_(ptr, self) } -func AtgLeakyReluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, negativeSlope Cscalar, selfIsResult int32){ -cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) -C.atg_leaky_relu_backward(ptr, gradOutput, self, negativeSlope , cselfIsResult) +func AtgLeakyReluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, negativeSlope Cscalar, selfIsResult int32) { + cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) + C.atg_leaky_relu_backward(ptr, gradOutput, self, negativeSlope, cselfIsResult) } -func AtgLeakyReluOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_leaky_relu_out(ptr, out, self) +func AtgLeakyReluOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_leaky_relu_out(ptr, out, self) } -func AtgLerp(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar){ -C.atg_lerp(ptr, self, end, weight ) +func AtgLerp(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar) { + C.atg_lerp(ptr, self, end, weight) } -func AtgLerp1(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor){ -C.atg_lerp1(ptr, self, end, weight) +func AtgLerp1(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor) { + C.atg_lerp1(ptr, self, end, weight) } -func AtgLerp_(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar){ -C.atg_lerp_(ptr, self, end, weight ) +func AtgLerp_(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar) { + C.atg_lerp_(ptr, self, end, weight) } -func AtgLerp1_(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor){ -C.atg_lerp_1(ptr, self, end, weight) +func AtgLerp1_(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor) { + C.atg_lerp_1(ptr, self, end, weight) } -func AtgLerpOut(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Cscalar){ -C.atg_lerp_out(ptr, out, self, end, weight ) +func AtgLerpOut(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Cscalar) { + C.atg_lerp_out(ptr, out, self, end, weight) } -func AtgLerpOut1(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Ctensor){ -C.atg_lerp_out1(ptr, out, self, end, weight) +func AtgLerpOut1(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Ctensor) { + C.atg_lerp_out1(ptr, out, self, end, weight) } -func AtgLgamma(ptr *Ctensor, self Ctensor){ -C.atg_lgamma(ptr, self) +func AtgLgamma(ptr *Ctensor, self Ctensor) { + C.atg_lgamma(ptr, self) } -func AtgLgamma_(ptr *Ctensor, self Ctensor){ -C.atg_lgamma_(ptr, self) +func AtgLgamma_(ptr *Ctensor, self Ctensor) { + C.atg_lgamma_(ptr, self) } -func AtgLgammaOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_lgamma_out(ptr, out, self) +func AtgLgammaOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_lgamma_out(ptr, out, self) } -func AtgLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){ -C.atg_linear(ptr, input, weight, bias) +func AtgLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor) { + C.atg_linear(ptr, input, weight, bias) } -func AtgLinspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, optionsKind int32, optionsDevice int32){ -csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_linspace(ptr, start , end , csteps, coptionsKind, coptionsDevice) +func AtgLinspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, optionsKind int32, optionsDevice int32) { + csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_linspace(ptr, start, end, csteps, coptionsKind, coptionsDevice) } -func AtgLinspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64){ -csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) -C.atg_linspace_out(ptr, out, start , end , csteps) +func AtgLinspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64) { + csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) + C.atg_linspace_out(ptr, out, start, end, csteps) } -func AtgLog(ptr *Ctensor, self Ctensor){ -C.atg_log(ptr, self) +func AtgLog(ptr *Ctensor, self Ctensor) { + C.atg_log(ptr, self) } -func AtgLog10(ptr *Ctensor, self Ctensor){ -C.atg_log10(ptr, self) +func AtgLog10(ptr *Ctensor, self Ctensor) { + C.atg_log10(ptr, self) } -func AtgLog10_(ptr *Ctensor, self Ctensor){ -C.atg_log10_(ptr, self) +func AtgLog10_(ptr *Ctensor, self Ctensor) { + C.atg_log10_(ptr, self) } -func AtgLog10Out(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_log10_out(ptr, out, self) +func AtgLog10Out(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_log10_out(ptr, out, self) } -func AtgLog1p(ptr *Ctensor, self Ctensor){ -C.atg_log1p(ptr, self) +func AtgLog1p(ptr *Ctensor, self Ctensor) { + C.atg_log1p(ptr, self) } -func AtgLog1p_(ptr *Ctensor, self Ctensor){ -C.atg_log1p_(ptr, self) +func AtgLog1p_(ptr *Ctensor, self Ctensor) { + C.atg_log1p_(ptr, self) } -func AtgLog1pOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_log1p_out(ptr, out, self) +func AtgLog1pOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_log1p_out(ptr, out, self) } -func AtgLog2(ptr *Ctensor, self Ctensor){ -C.atg_log2(ptr, self) +func AtgLog2(ptr *Ctensor, self Ctensor) { + C.atg_log2(ptr, self) } -func AtgLog2_(ptr *Ctensor, self Ctensor){ -C.atg_log2_(ptr, self) +func AtgLog2_(ptr *Ctensor, self Ctensor) { + C.atg_log2_(ptr, self) } -func AtgLog2Out(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_log2_out(ptr, out, self) +func AtgLog2Out(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_log2_out(ptr, out, self) } -func AtgLog_(ptr *Ctensor, self Ctensor){ -C.atg_log_(ptr, self) +func AtgLog_(ptr *Ctensor, self Ctensor) { + C.atg_log_(ptr, self) } -func AtgLogNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64){ -cmean := *(*C.double)(unsafe.Pointer(&mean)) -cstd := *(*C.double)(unsafe.Pointer(&std)) -C.atg_log_normal_(ptr, self, cmean, cstd) +func AtgLogNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64) { + cmean := *(*C.double)(unsafe.Pointer(&mean)) + cstd := *(*C.double)(unsafe.Pointer(&std)) + C.atg_log_normal_(ptr, self, cmean, cstd) } -func AtgLogOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_log_out(ptr, out, self) +func AtgLogOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_log_out(ptr, out, self) } -func AtgLogSigmoid(ptr *Ctensor, self Ctensor){ -C.atg_log_sigmoid(ptr, self) +func AtgLogSigmoid(ptr *Ctensor, self Ctensor) { + C.atg_log_sigmoid(ptr, self) } -func AtgLogSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor){ -C.atg_log_sigmoid_backward(ptr, gradOutput, self, buffer) +func AtgLogSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor) { + C.atg_log_sigmoid_backward(ptr, gradOutput, self, buffer) } -func AtgLogSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor){ -C.atg_log_sigmoid_backward_out(ptr, gradInput, gradOutput, self, buffer) +func AtgLogSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor) { + C.atg_log_sigmoid_backward_out(ptr, gradInput, gradOutput, self, buffer) } -func AtgLogSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_log_sigmoid_out(ptr, out, self) +func AtgLogSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_log_sigmoid_out(ptr, out, self) } -func AtgLogSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_log_softmax(ptr, self, cdim, cdtype) +func AtgLogSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_log_softmax(ptr, self, cdim, cdtype) } -func AtgLogdet(ptr *Ctensor, self Ctensor){ -C.atg_logdet(ptr, self) +func AtgLogdet(ptr *Ctensor, self Ctensor) { + C.atg_logdet(ptr, self) } -func AtgLogicalAnd(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_and(ptr, self, other) +func AtgLogicalAnd(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_and(ptr, self, other) } -func AtgLogicalAnd_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_and_(ptr, self, other) +func AtgLogicalAnd_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_and_(ptr, self, other) } -func AtgLogicalAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_and_out(ptr, out, self, other) +func AtgLogicalAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_and_out(ptr, out, self, other) } -func AtgLogicalNot(ptr *Ctensor, self Ctensor){ -C.atg_logical_not(ptr, self) +func AtgLogicalNot(ptr *Ctensor, self Ctensor) { + C.atg_logical_not(ptr, self) } -func AtgLogicalNot_(ptr *Ctensor, self Ctensor){ -C.atg_logical_not_(ptr, self) +func AtgLogicalNot_(ptr *Ctensor, self Ctensor) { + C.atg_logical_not_(ptr, self) } -func AtgLogicalNotOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_logical_not_out(ptr, out, self) +func AtgLogicalNotOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_logical_not_out(ptr, out, self) } -func AtgLogicalOr(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_or(ptr, self, other) +func AtgLogicalOr(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_or(ptr, self, other) } -func AtgLogicalOr_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_or_(ptr, self, other) +func AtgLogicalOr_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_or_(ptr, self, other) } -func AtgLogicalOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_or_out(ptr, out, self, other) +func AtgLogicalOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_or_out(ptr, out, self, other) } -func AtgLogicalXor(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_xor(ptr, self, other) +func AtgLogicalXor(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_xor(ptr, self, other) } -func AtgLogicalXor_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_xor_(ptr, self, other) +func AtgLogicalXor_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_xor_(ptr, self, other) } -func AtgLogicalXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_logical_xor_out(ptr, out, self, other) +func AtgLogicalXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_logical_xor_out(ptr, out, self, other) } -func AtgLogspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, base float64, optionsKind int32, optionsDevice int32){ -csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) -cbase := *(*C.double)(unsafe.Pointer(&base)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_logspace(ptr, start , end , csteps, cbase, coptionsKind, coptionsDevice) +func AtgLogspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, base float64, optionsKind int32, optionsDevice int32) { + csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) + cbase := *(*C.double)(unsafe.Pointer(&base)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_logspace(ptr, start, end, csteps, cbase, coptionsKind, coptionsDevice) } -func AtgLogspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64, base float64){ -csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) -cbase := *(*C.double)(unsafe.Pointer(&base)) -C.atg_logspace_out(ptr, out, start , end , csteps, cbase) +func AtgLogspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64, base float64) { + csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) + cbase := *(*C.double)(unsafe.Pointer(&base)) + C.atg_logspace_out(ptr, out, start, end, csteps, cbase) } -func AtgLogsumexp(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_logsumexp(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +func AtgLogsumexp(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_logsumexp(ptr, self, cdimDataPtr, cdimLen, ckeepdim) } -func AtgLogsumexpOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_logsumexp_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +func AtgLogsumexpOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_logsumexp_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) } -func AtgLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ -chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) -chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -C.atg_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +func AtgLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { + chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) + chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + C.atg_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) } -func AtgLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ -chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) -chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -C.atg_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +func AtgLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { + chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) + chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + C.atg_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) } -func AtgLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ -chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) -chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) -C.atg_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh) +func AtgLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor) { + chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) + chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) + C.atg_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh) } -func AtgLstsq(ptr *Ctensor, self Ctensor, a Ctensor){ -C.atg_lstsq(ptr, self, a) +func AtgLstsq(ptr *Ctensor, self Ctensor, a Ctensor) { + C.atg_lstsq(ptr, self, a) } -func AtgLstsqOut(ptr *Ctensor, x Ctensor, qr Ctensor, self Ctensor, a Ctensor){ -C.atg_lstsq_out(ptr, x, qr, self, a) +func AtgLstsqOut(ptr *Ctensor, x Ctensor, qr Ctensor, self Ctensor, a Ctensor) { + C.atg_lstsq_out(ptr, x, qr, self, a) } -func AtgLt(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_lt(ptr, self, other ) +func AtgLt(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_lt(ptr, self, other) } -func AtgLt1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_lt1(ptr, self, other) -} -func AtgLt_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_lt_(ptr, self, other ) -} -func AtgLt1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_lt_1(ptr, self, other) -} -func AtgLtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_lt_out(ptr, out, self, other ) -} -func AtgLtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_lt_out1(ptr, out, self, other) -} -func AtgLuSolve(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ -C.atg_lu_solve(ptr, self, lUData, lUPivots) -} -func AtgLuSolveOut(ptr *Ctensor, out Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ -C.atg_lu_solve_out(ptr, out, self, lUData, lUPivots) -} -func AtgMarginRankingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64){ -cmargin := *(*C.double)(unsafe.Pointer(&margin)) -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_margin_ranking_loss(ptr, input1, input2, target, cmargin, creduction) -} -func AtgMaskedFill(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar){ -C.atg_masked_fill(ptr, self, mask, value ) -} -func AtgMaskedFill1(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor){ -C.atg_masked_fill1(ptr, self, mask, value) -} -func AtgMaskedFill_(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar){ -C.atg_masked_fill_(ptr, self, mask, value ) -} -func AtgMaskedFill1_(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor){ -C.atg_masked_fill_1(ptr, self, mask, value) -} -func AtgMaskedScatter(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor){ -C.atg_masked_scatter(ptr, self, mask, source) -} -func AtgMaskedScatter_(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor){ -C.atg_masked_scatter_(ptr, self, mask, source) -} -func AtgMaskedSelect(ptr *Ctensor, self Ctensor, mask Ctensor){ -C.atg_masked_select(ptr, self, mask) -} -func AtgMaskedSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, mask Ctensor){ -C.atg_masked_select_out(ptr, out, self, mask) -} -func AtgMatmul(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_matmul(ptr, self, other) -} -func AtgMatmulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_matmul_out(ptr, out, self, other) -} -func AtgMatrixPower(ptr *Ctensor, self Ctensor, n int64){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -C.atg_matrix_power(ptr, self, cn) -} -func AtgMatrixRank(ptr *Ctensor, self Ctensor, symmetric int32){ -csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) -C.atg_matrix_rank(ptr, self, csymmetric) -} -func AtgMatrixRank1(ptr *Ctensor, self Ctensor, tol float64, symmetric int32){ -ctol := *(*C.double)(unsafe.Pointer(&tol)) -csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) -C.atg_matrix_rank1(ptr, self, ctol, csymmetric) -} -func AtgMax(ptr *Ctensor, self Ctensor){ -C.atg_max(ptr, self) -} -func AtgMax1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_max1(ptr, self, other) -} -func AtgMax2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_max2(ptr, self, cdim, ckeepdim) -} -func AtgMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_max_out(ptr, out, self, other) -} -func AtgMaxOut1(ptr *Ctensor, max Ctensor, maxValues Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_max_out1(ptr, max, maxValues, self, cdim, ckeepdim) -} -func AtgMaxPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool1dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool1d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool2dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool2d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool2dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool2d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) -} -func AtgMaxPool2dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool2d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) -} -func AtgMaxPool2dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool2d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool3dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool3d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool3dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool3d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) -} -func AtgMaxPool3dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool3d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) -} -func AtgMaxPool3dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_max_pool3d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxUnpool2d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_max_unpool2d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMaxUnpool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_max_unpool2d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMaxUnpool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_max_unpool2d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMaxUnpool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_max_unpool2d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMaxUnpool3d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_max_unpool3d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgMaxUnpool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_max_unpool3d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgMaxUnpool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_max_unpool3d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgMaxUnpool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_max_unpool3d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgMaxValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_max_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgMean(ptr *Ctensor, self Ctensor, dtype int32){ -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_mean(ptr, self, cdtype) -} -func AtgMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_mean1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgMeanOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_mean_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgMedian(ptr *Ctensor, self Ctensor){ -C.atg_median(ptr, self) -} -func AtgMedian1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_median1(ptr, self, cdim, ckeepdim) -} -func AtgMedianOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_median_out(ptr, values, indices, self, cdim, ckeepdim) +func AtgLt1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_lt1(ptr, self, other) +} +func AtgLt_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_lt_(ptr, self, other) +} +func AtgLt1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_lt_1(ptr, self, other) +} +func AtgLtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_lt_out(ptr, out, self, other) +} +func AtgLtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_lt_out1(ptr, out, self, other) +} +func AtgLuSolve(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor) { + C.atg_lu_solve(ptr, self, lUData, lUPivots) +} +func AtgLuSolveOut(ptr *Ctensor, out Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor) { + C.atg_lu_solve_out(ptr, out, self, lUData, lUPivots) +} +func AtgMarginRankingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64) { + cmargin := *(*C.double)(unsafe.Pointer(&margin)) + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_margin_ranking_loss(ptr, input1, input2, target, cmargin, creduction) +} +func AtgMaskedFill(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar) { + C.atg_masked_fill(ptr, self, mask, value) +} +func AtgMaskedFill1(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor) { + C.atg_masked_fill1(ptr, self, mask, value) +} +func AtgMaskedFill_(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar) { + C.atg_masked_fill_(ptr, self, mask, value) +} +func AtgMaskedFill1_(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor) { + C.atg_masked_fill_1(ptr, self, mask, value) +} +func AtgMaskedScatter(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor) { + C.atg_masked_scatter(ptr, self, mask, source) +} +func AtgMaskedScatter_(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor) { + C.atg_masked_scatter_(ptr, self, mask, source) +} +func AtgMaskedSelect(ptr *Ctensor, self Ctensor, mask Ctensor) { + C.atg_masked_select(ptr, self, mask) +} +func AtgMaskedSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, mask Ctensor) { + C.atg_masked_select_out(ptr, out, self, mask) +} +func AtgMatmul(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_matmul(ptr, self, other) +} +func AtgMatmulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_matmul_out(ptr, out, self, other) +} +func AtgMatrixPower(ptr *Ctensor, self Ctensor, n int64) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + C.atg_matrix_power(ptr, self, cn) +} +func AtgMatrixRank(ptr *Ctensor, self Ctensor, symmetric int32) { + csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) + C.atg_matrix_rank(ptr, self, csymmetric) +} +func AtgMatrixRank1(ptr *Ctensor, self Ctensor, tol float64, symmetric int32) { + ctol := *(*C.double)(unsafe.Pointer(&tol)) + csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) + C.atg_matrix_rank1(ptr, self, ctol, csymmetric) +} +func AtgMax(ptr *Ctensor, self Ctensor) { + C.atg_max(ptr, self) +} +func AtgMax1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_max1(ptr, self, other) +} +func AtgMax2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_max2(ptr, self, cdim, ckeepdim) +} +func AtgMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_max_out(ptr, out, self, other) +} +func AtgMaxOut1(ptr *Ctensor, max Ctensor, maxValues Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_max_out1(ptr, max, maxValues, self, cdim, ckeepdim) +} +func AtgMaxPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool1dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool1d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool2d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool2d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool2dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool2d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool2dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool2d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool3d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool3d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool3dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool3d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool3dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_max_pool3d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxUnpool2d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_max_unpool2d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_max_unpool2d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_max_unpool2d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_max_unpool2d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool3d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_max_unpool3d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_max_unpool3d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_max_unpool3d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_max_unpool3d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_max_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgMean(ptr *Ctensor, self Ctensor, dtype int32) { + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_mean(ptr, self, cdtype) +} +func AtgMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_mean1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgMeanOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_mean_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgMedian(ptr *Ctensor, self Ctensor) { + C.atg_median(ptr, self) +} +func AtgMedian1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_median1(ptr, self, cdim, ckeepdim) +} +func AtgMedianOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_median_out(ptr, values, indices, self, cdim, ckeepdim) } -func AtgMin(ptr *Ctensor, self Ctensor){ -C.atg_min(ptr, self) -} -func AtgMin1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_min1(ptr, self, other) -} -func AtgMin2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_min2(ptr, self, cdim, ckeepdim) -} -func AtgMinOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_min_out(ptr, out, self, other) -} -func AtgMinOut1(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_min_out1(ptr, min, minIndices, self, cdim, ckeepdim) -} -func AtgMinValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_min_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgMiopenBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) -cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) -C.atg_miopen_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) -} -func AtgMiopenBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64){ -cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) -C.atg_miopen_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon) -} -func AtgMiopenConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionBackwardBias(ptr *Ctensor, gradOutput Ctensor){ -C.atg_miopen_convolution_backward_bias(ptr, gradOutput) -} -func AtgMiopenConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) -cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_convolution_transpose(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenDepthwiseConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_depthwise_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenDepthwiseConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) -cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_depthwise_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenDepthwiseConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) -cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) -C.atg_miopen_depthwise_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){ -cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) -cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) -cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) -cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) -chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) -cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) -C.atg_miopen_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) -} -func AtgMkldnnAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -C.atg_mkldnn_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMkldnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -C.atg_mkldnn_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgMkldnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){ -cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) -cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) -C.atg_mkldnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) -} -func AtgMkldnnConvolutionBackwardWeights(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){ -cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) -cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) -C.atg_mkldnn_convolution_backward_weights(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) -} -func AtgMkldnnLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){ -C.atg_mkldnn_linear(ptr, input, weight, bias) -} -func AtgMkldnnMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_mkldnn_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMkldnnReorderConv2dWeight(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) -C.atg_mkldnn_reorder_conv2d_weight(ptr, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgMm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ -C.atg_mm(ptr, self, mat2) -} -func AtgMmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor){ -C.atg_mm_out(ptr, out, self, mat2) -} -func AtgMode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_mode(ptr, self, cdim, ckeepdim) -} -func AtgModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_mode_out(ptr, values, indices, self, cdim, ckeepdim) -} -func AtgMseLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_mse_loss(ptr, self, target, creduction) -} -func AtgMseLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_mse_loss_backward(ptr, gradOutput, self, target, creduction) -} -func AtgMseLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_mse_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) -} -func AtgMseLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_mse_loss_out(ptr, out, self, target, creduction) -} -func AtgMul(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_mul(ptr, self, other) -} -func AtgMul1(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_mul1(ptr, self, other ) -} -func AtgMul_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_mul_(ptr, self, other) -} -func AtgMul1_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_mul_1(ptr, self, other ) -} -func AtgMulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_mul_out(ptr, out, self, other) -} -func AtgMultiMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_multi_margin_loss_backward(ptr, gradOutput, self, target, p , margin , weight, creduction) -} -func AtgMultiMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_multi_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, p , margin , weight, creduction) -} -func AtgMultilabelMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_multilabel_margin_loss(ptr, self, target, creduction) -} -func AtgMultilabelMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_multilabel_margin_loss_backward(ptr, gradOutput, self, target, creduction, isTarget) -} -func AtgMultilabelMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_multilabel_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction, isTarget) -} -func AtgMultilabelMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_multilabel_margin_loss_out(ptr, out, self, target, creduction) -} -func AtgMultinomial(ptr *Ctensor, self Ctensor, numSamples int64, replacement int32){ -cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) -creplacement := *(*C.int)(unsafe.Pointer(&replacement)) -C.atg_multinomial(ptr, self, cnumSamples, creplacement) -} -func AtgMultinomialOut(ptr *Ctensor, out Ctensor, self Ctensor, numSamples int64, replacement int32){ -cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) -creplacement := *(*C.int)(unsafe.Pointer(&replacement)) -C.atg_multinomial_out(ptr, out, self, cnumSamples, creplacement) -} -func AtgMv(ptr *Ctensor, self Ctensor, vec Ctensor){ -C.atg_mv(ptr, self, vec) -} -func AtgMvOut(ptr *Ctensor, out Ctensor, self Ctensor, vec Ctensor){ -C.atg_mv_out(ptr, out, self, vec) -} -func AtgMvlgamma(ptr *Ctensor, self Ctensor, p int64){ -cp := *(*C.int64_t)(unsafe.Pointer(&p)) -C.atg_mvlgamma(ptr, self, cp) -} -func AtgMvlgamma_(ptr *Ctensor, self Ctensor, p int64){ -cp := *(*C.int64_t)(unsafe.Pointer(&p)) -C.atg_mvlgamma_(ptr, self, cp) -} -func AtgNarrow(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cstart := *(*C.int64_t)(unsafe.Pointer(&start)) -clength := *(*C.int64_t)(unsafe.Pointer(&length)) -C.atg_narrow(ptr, self, cdim, cstart, clength) -} -func AtgNarrow1(ptr *Ctensor, self Ctensor, dim int64, start Ctensor, length int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -clength := *(*C.int64_t)(unsafe.Pointer(&length)) -C.atg_narrow1(ptr, self, cdim, start, clength) -} -func AtgNarrowCopy(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cstart := *(*C.int64_t)(unsafe.Pointer(&start)) -clength := *(*C.int64_t)(unsafe.Pointer(&length)) -C.atg_narrow_copy(ptr, self, cdim, cstart, clength) -} -func AtgNativeBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -C.atg_native_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) -} -func AtgNativeBatchNormOut(ptr *Ctensor, out Ctensor, saveMean Ctensor, saveInvstd Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -C.atg_native_batch_norm_out(ptr, out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) -} -func AtgNativeLayerNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, m int64, n int64, eps float64){ -cm := *(*C.int64_t)(unsafe.Pointer(&m)) -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -C.atg_native_layer_norm(ptr, input, weight, bias, cm, cn, ceps) -} -func AtgNativeNorm(ptr *Ctensor, self Ctensor){ -C.atg_native_norm(ptr, self) -} -func AtgNe(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_ne(ptr, self, other ) -} -func AtgNe1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_ne1(ptr, self, other) -} -func AtgNe_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_ne_(ptr, self, other ) -} -func AtgNe1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_ne_1(ptr, self, other) -} -func AtgNeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_ne_out(ptr, out, self, other ) -} -func AtgNeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_ne_out1(ptr, out, self, other) -} -func AtgNeg(ptr *Ctensor, self Ctensor){ -C.atg_neg(ptr, self) -} -func AtgNeg_(ptr *Ctensor, self Ctensor){ -C.atg_neg_(ptr, self) -} -func AtgNegOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_neg_out(ptr, out, self) -} -func AtgNewEmpty(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_new_empty(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgNewFull(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_new_full(ptr, self, csizeDataPtr, csizeLen, fillValue , coptionsKind, coptionsDevice) -} -func AtgNewZeros(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_new_zeros(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgNllLoss(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) -C.atg_nll_loss(ptr, self, target, weight, creduction, cignoreIndex) -} -func AtgNllLoss2d(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) -C.atg_nll_loss2d(ptr, self, target, weight, creduction, cignoreIndex) -} -func AtgNllLoss2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) -C.atg_nll_loss2d_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) -} -func AtgNllLoss2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) -C.atg_nll_loss2d_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) -} -func AtgNllLoss2dOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) -C.atg_nll_loss2d_out(ptr, out, self, target, weight, creduction, cignoreIndex) -} -func AtgNllLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) -C.atg_nll_loss_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) -} -func AtgNllLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) -C.atg_nll_loss_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) -} -func AtgNllLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) -C.atg_nll_loss_out(ptr, out, self, target, weight, creduction, cignoreIndex) -} -func AtgNonzero(ptr *Ctensor, self Ctensor){ -C.atg_nonzero(ptr, self) +func AtgMin(ptr *Ctensor, self Ctensor) { + C.atg_min(ptr, self) +} +func AtgMin1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_min1(ptr, self, other) +} +func AtgMin2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_min2(ptr, self, cdim, ckeepdim) +} +func AtgMinOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_min_out(ptr, out, self, other) +} +func AtgMinOut1(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_min_out1(ptr, min, minIndices, self, cdim, ckeepdim) +} +func AtgMinValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_min_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgMiopenBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) + cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) + C.atg_miopen_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) +} +func AtgMiopenBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64) { + cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) + C.atg_miopen_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon) +} +func AtgMiopenConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionBackwardBias(ptr *Ctensor, gradOutput Ctensor) { + C.atg_miopen_convolution_backward_bias(ptr, gradOutput) +} +func AtgMiopenConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) + cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) + cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_convolution_transpose(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) + cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_depthwise_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) + cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_depthwise_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { + cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) + cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) + cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) + C.atg_miopen_depthwise_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor) { + cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) + cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) + cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) + cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) + chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) + cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) + C.atg_miopen_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) +} +func AtgMkldnnAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + C.atg_mkldnn_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMkldnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + C.atg_mkldnn_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgMkldnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32) { + cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) + cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) + C.atg_mkldnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) +} +func AtgMkldnnConvolutionBackwardWeights(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32) { + cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) + cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) + C.atg_mkldnn_convolution_backward_weights(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) +} +func AtgMkldnnLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor) { + C.atg_mkldnn_linear(ptr, input, weight, bias) +} +func AtgMkldnnMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_mkldnn_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMkldnnReorderConv2dWeight(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) + C.atg_mkldnn_reorder_conv2d_weight(ptr, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgMm(ptr *Ctensor, self Ctensor, mat2 Ctensor) { + C.atg_mm(ptr, self, mat2) +} +func AtgMmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor) { + C.atg_mm_out(ptr, out, self, mat2) +} +func AtgMode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_mode(ptr, self, cdim, ckeepdim) +} +func AtgModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_mode_out(ptr, values, indices, self, cdim, ckeepdim) +} +func AtgMseLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_mse_loss(ptr, self, target, creduction) +} +func AtgMseLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_mse_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgMseLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_mse_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgMseLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_mse_loss_out(ptr, out, self, target, creduction) +} +func AtgMul(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_mul(ptr, self, other) +} +func AtgMul1(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_mul1(ptr, self, other) +} +func AtgMul_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_mul_(ptr, self, other) +} +func AtgMul1_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_mul_1(ptr, self, other) +} +func AtgMulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_mul_out(ptr, out, self, other) +} +func AtgMultiMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_multi_margin_loss_backward(ptr, gradOutput, self, target, p, margin, weight, creduction) +} +func AtgMultiMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_multi_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, p, margin, weight, creduction) +} +func AtgMultilabelMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_multilabel_margin_loss(ptr, self, target, creduction) +} +func AtgMultilabelMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_multilabel_margin_loss_backward(ptr, gradOutput, self, target, creduction, isTarget) +} +func AtgMultilabelMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_multilabel_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction, isTarget) +} +func AtgMultilabelMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_multilabel_margin_loss_out(ptr, out, self, target, creduction) +} +func AtgMultinomial(ptr *Ctensor, self Ctensor, numSamples int64, replacement int32) { + cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) + creplacement := *(*C.int)(unsafe.Pointer(&replacement)) + C.atg_multinomial(ptr, self, cnumSamples, creplacement) +} +func AtgMultinomialOut(ptr *Ctensor, out Ctensor, self Ctensor, numSamples int64, replacement int32) { + cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) + creplacement := *(*C.int)(unsafe.Pointer(&replacement)) + C.atg_multinomial_out(ptr, out, self, cnumSamples, creplacement) +} +func AtgMv(ptr *Ctensor, self Ctensor, vec Ctensor) { + C.atg_mv(ptr, self, vec) +} +func AtgMvOut(ptr *Ctensor, out Ctensor, self Ctensor, vec Ctensor) { + C.atg_mv_out(ptr, out, self, vec) +} +func AtgMvlgamma(ptr *Ctensor, self Ctensor, p int64) { + cp := *(*C.int64_t)(unsafe.Pointer(&p)) + C.atg_mvlgamma(ptr, self, cp) +} +func AtgMvlgamma_(ptr *Ctensor, self Ctensor, p int64) { + cp := *(*C.int64_t)(unsafe.Pointer(&p)) + C.atg_mvlgamma_(ptr, self, cp) +} +func AtgNarrow(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cstart := *(*C.int64_t)(unsafe.Pointer(&start)) + clength := *(*C.int64_t)(unsafe.Pointer(&length)) + C.atg_narrow(ptr, self, cdim, cstart, clength) +} +func AtgNarrow1(ptr *Ctensor, self Ctensor, dim int64, start Ctensor, length int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + clength := *(*C.int64_t)(unsafe.Pointer(&length)) + C.atg_narrow1(ptr, self, cdim, start, clength) +} +func AtgNarrowCopy(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cstart := *(*C.int64_t)(unsafe.Pointer(&start)) + clength := *(*C.int64_t)(unsafe.Pointer(&length)) + C.atg_narrow_copy(ptr, self, cdim, cstart, clength) +} +func AtgNativeBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + C.atg_native_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) +} +func AtgNativeBatchNormOut(ptr *Ctensor, out Ctensor, saveMean Ctensor, saveInvstd Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + C.atg_native_batch_norm_out(ptr, out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) +} +func AtgNativeLayerNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, m int64, n int64, eps float64) { + cm := *(*C.int64_t)(unsafe.Pointer(&m)) + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + C.atg_native_layer_norm(ptr, input, weight, bias, cm, cn, ceps) +} +func AtgNativeNorm(ptr *Ctensor, self Ctensor) { + C.atg_native_norm(ptr, self) +} +func AtgNe(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_ne(ptr, self, other) +} +func AtgNe1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_ne1(ptr, self, other) +} +func AtgNe_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_ne_(ptr, self, other) +} +func AtgNe1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_ne_1(ptr, self, other) +} +func AtgNeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_ne_out(ptr, out, self, other) +} +func AtgNeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_ne_out1(ptr, out, self, other) +} +func AtgNeg(ptr *Ctensor, self Ctensor) { + C.atg_neg(ptr, self) +} +func AtgNeg_(ptr *Ctensor, self Ctensor) { + C.atg_neg_(ptr, self) +} +func AtgNegOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_neg_out(ptr, out, self) +} +func AtgNewEmpty(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_new_empty(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgNewFull(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_new_full(ptr, self, csizeDataPtr, csizeLen, fillValue, coptionsKind, coptionsDevice) +} +func AtgNewZeros(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_new_zeros(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgNllLoss(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) + C.atg_nll_loss(ptr, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLoss2d(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) + C.atg_nll_loss2d(ptr, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLoss2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) + C.atg_nll_loss2d_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLoss2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) + C.atg_nll_loss2d_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLoss2dOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) + C.atg_nll_loss2d_out(ptr, out, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) + C.atg_nll_loss_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) + C.atg_nll_loss_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) + C.atg_nll_loss_out(ptr, out, self, target, weight, creduction, cignoreIndex) +} +func AtgNonzero(ptr *Ctensor, self Ctensor) { + C.atg_nonzero(ptr, self) } -func AtgNonzeroOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_nonzero_out(ptr, out, self) -} -func AtgNorm(ptr *Ctensor, self Ctensor){ -C.atg_norm(ptr, self) -} -func AtgNorm1(ptr *Ctensor, self Ctensor, p Cscalar, dtype int32){ -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_norm1(ptr, self, p , cdtype) -} -func AtgNorm2(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_norm2(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim) -} -func AtgNorm3(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_norm3(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgNormExceptDim(ptr *Ctensor, v Ctensor, pow int64, dim int64){ -cpow := *(*C.int64_t)(unsafe.Pointer(&pow)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_norm_except_dim(ptr, v, cpow, cdim) -} -func AtgNormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_norm_out(ptr, out, self, p , cdimDataPtr, cdimLen, ckeepdim) -} -func AtgNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_norm_out1(ptr, out, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64){ -cmean := *(*C.double)(unsafe.Pointer(&mean)) -cstd := *(*C.double)(unsafe.Pointer(&std)) -C.atg_normal_(ptr, self, cmean, cstd) -} -func AtgNormalOut(ptr *Ctensor, out Ctensor, mean Ctensor, std float64){ -cstd := *(*C.double)(unsafe.Pointer(&std)) -C.atg_normal_out(ptr, out, mean, cstd) -} -func AtgNormalOut1(ptr *Ctensor, out Ctensor, mean float64, std Ctensor){ -cmean := *(*C.double)(unsafe.Pointer(&mean)) -C.atg_normal_out1(ptr, out, cmean, std) -} -func AtgNormalOut2(ptr *Ctensor, out Ctensor, mean Ctensor, std Ctensor){ -C.atg_normal_out2(ptr, out, mean, std) -} -func AtgNormalOut3(ptr *Ctensor, out Ctensor, mean float64, std float64, sizeData []int64, sizeLen int){ -cmean := *(*C.double)(unsafe.Pointer(&mean)) -cstd := *(*C.double)(unsafe.Pointer(&std)) -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_normal_out3(ptr, out, cmean, cstd, csizeDataPtr, csizeLen) -} -func AtgNuclearNorm(ptr *Ctensor, self Ctensor, keepdim int32){ -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_nuclear_norm(ptr, self, ckeepdim) -} -func AtgNuclearNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_nuclear_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgNuclearNormOut(ptr *Ctensor, out Ctensor, self Ctensor, keepdim int32){ -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_nuclear_norm_out(ptr, out, self, ckeepdim) -} -func AtgNuclearNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_nuclear_norm_out1(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgNumpyT(ptr *Ctensor, self Ctensor){ -C.atg_numpy_t(ptr, self) -} -func AtgOneHot(ptr *Ctensor, self Ctensor, numClasses int64){ -cnumClasses := *(*C.int64_t)(unsafe.Pointer(&numClasses)) -C.atg_one_hot(ptr, self, cnumClasses) -} -func AtgOnes(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_ones(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgOnesLike(ptr *Ctensor, self Ctensor){ -C.atg_ones_like(ptr, self) -} -func AtgOnesOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_ones_out(ptr, out, csizeDataPtr, csizeLen) -} -func AtgOrgqr(ptr *Ctensor, self Ctensor, input2 Ctensor){ -C.atg_orgqr(ptr, self, input2) -} -func AtgOrgqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor){ -C.atg_orgqr_out(ptr, out, self, input2) -} -func AtgOrmqr(ptr *Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32){ -cleft := *(*C.int)(unsafe.Pointer(&left)) -ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) -C.atg_ormqr(ptr, self, input2, input3, cleft, ctranspose) -} -func AtgOrmqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32){ -cleft := *(*C.int)(unsafe.Pointer(&left)) -ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) -C.atg_ormqr_out(ptr, out, self, input2, input3, cleft, ctranspose) -} -func AtgPairwiseDistance(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, eps float64, keepdim int32){ -cp := *(*C.double)(unsafe.Pointer(&p)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_pairwise_distance(ptr, x1, x2, cp, ceps, ckeepdim) -} -func AtgPdist(ptr *Ctensor, self Ctensor, p float64){ -cp := *(*C.double)(unsafe.Pointer(&p)) -C.atg_pdist(ptr, self, cp) -} -func AtgPermute(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int){ -cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) -cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) -C.atg_permute(ptr, self, cdimsDataPtr, cdimsLen) -} -func AtgPinMemory(ptr *Ctensor, self Ctensor){ -C.atg_pin_memory(ptr, self) -} -func AtgPinverse(ptr *Ctensor, self Ctensor, rcond float64){ -crcond := *(*C.double)(unsafe.Pointer(&rcond)) -C.atg_pinverse(ptr, self, crcond) -} -func AtgPixelShuffle(ptr *Ctensor, self Ctensor, upscaleFactor int64){ -cupscaleFactor := *(*C.int64_t)(unsafe.Pointer(&upscaleFactor)) -C.atg_pixel_shuffle(ptr, self, cupscaleFactor) -} -func AtgPoisson(ptr *Ctensor, self Ctensor){ -C.atg_poisson(ptr, self) -} -func AtgPoissonNllLoss(ptr *Ctensor, input Ctensor, target Ctensor, logInput int32, full int32, eps float64, reduction int64){ -clogInput := *(*C.int)(unsafe.Pointer(&logInput)) -cfull := *(*C.int)(unsafe.Pointer(&full)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_poisson_nll_loss(ptr, input, target, clogInput, cfull, ceps, creduction) -} -func AtgPolygamma(ptr *Ctensor, n int64, self Ctensor){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -C.atg_polygamma(ptr, cn, self) -} -func AtgPolygamma_(ptr *Ctensor, self Ctensor, n int64){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -C.atg_polygamma_(ptr, self, cn) -} -func AtgPolygammaOut(ptr *Ctensor, out Ctensor, n int64, self Ctensor){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -C.atg_polygamma_out(ptr, out, cn, self) -} -func AtgPow(ptr *Ctensor, self Ctensor, exponent Cscalar){ -C.atg_pow(ptr, self, exponent ) -} -func AtgPow1(ptr *Ctensor, self Ctensor, exponent Ctensor){ -C.atg_pow1(ptr, self, exponent) -} -func AtgPow2(ptr *Ctensor, selfScalar Cscalar, exponent Ctensor){ -C.atg_pow2(ptr, selfScalar , exponent) -} -func AtgPow_(ptr *Ctensor, self Ctensor, exponent Cscalar){ -C.atg_pow_(ptr, self, exponent ) -} -func AtgPow1_(ptr *Ctensor, self Ctensor, exponent Ctensor){ -C.atg_pow_1(ptr, self, exponent) -} -func AtgPowOut(ptr *Ctensor, out Ctensor, self Ctensor, exponent Cscalar){ -C.atg_pow_out(ptr, out, self, exponent ) -} -func AtgPowOut1(ptr *Ctensor, out Ctensor, self Ctensor, exponent Ctensor){ -C.atg_pow_out1(ptr, out, self, exponent) -} -func AtgPowOut2(ptr *Ctensor, out Ctensor, selfScalar Cscalar, exponent Ctensor){ -C.atg_pow_out2(ptr, out, selfScalar , exponent) -} -func AtgPrelu(ptr *Ctensor, self Ctensor, weight Ctensor){ -C.atg_prelu(ptr, self, weight) -} -func AtgPreluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor){ -C.atg_prelu_backward(ptr, gradOutput, self, weight) -} -func AtgProd(ptr *Ctensor, self Ctensor, dtype int32){ -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_prod(ptr, self, cdtype) -} -func AtgProd1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_prod1(ptr, self, cdim, ckeepdim, cdtype) -} -func AtgProdOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_prod_out(ptr, out, self, cdim, ckeepdim, cdtype) -} -func AtgPut_(ptr *Ctensor, self Ctensor, index Ctensor, source Ctensor, accumulate int32){ -caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) -C.atg_put_(ptr, self, index, source, caccumulate) -} -func AtgQPerChannelScales(ptr *Ctensor, self Ctensor){ -C.atg_q_per_channel_scales(ptr, self) -} -func AtgQPerChannelZeroPoints(ptr *Ctensor, self Ctensor){ -C.atg_q_per_channel_zero_points(ptr, self) -} -func AtgQr(ptr *Ctensor, self Ctensor, some int32){ -csome := *(*C.int)(unsafe.Pointer(&some)) -C.atg_qr(ptr, self, csome) -} -func AtgQrOut(ptr *Ctensor, q Ctensor, r Ctensor, self Ctensor, some int32){ -csome := *(*C.int)(unsafe.Pointer(&some)) -C.atg_qr_out(ptr, q, r, self, csome) -} -func AtgQuantizePerChannel(ptr *Ctensor, self Ctensor, scales Ctensor, zeroPoints Ctensor, axis int64, dtype int32){ -caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_quantize_per_channel(ptr, self, scales, zeroPoints, caxis, cdtype) -} -func AtgQuantizePerTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, dtype int32){ -cscale := *(*C.double)(unsafe.Pointer(&scale)) -czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_quantize_per_tensor(ptr, self, cscale, czeroPoint, cdtype) -} -func AtgQuantizedBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, vari Ctensor, eps float64, outputScale float64, outputZeroPoint int64){ -ceps := *(*C.double)(unsafe.Pointer(&eps)) -coutputScale := *(*C.double)(unsafe.Pointer(&outputScale)) -coutputZeroPoint := *(*C.int64_t)(unsafe.Pointer(&outputZeroPoint)) -C.atg_quantized_batch_norm(ptr, input, weight, bias, mean, vari, ceps, coutputScale, coutputZeroPoint) -} -func AtgQuantizedGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -C.atg_quantized_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgQuantizedGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -C.atg_quantized_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgQuantizedGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ -C.atg_quantized_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) -} -func AtgQuantizedLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32, dtype int32, useDynamic int32){ -chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) -chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic)) -C.atg_quantized_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst, cdtype, cuseDynamic) -} -func AtgQuantizedLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, dtype int32, useDynamic int32){ -chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) -chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic)) -C.atg_quantized_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cdtype, cuseDynamic) -} -func AtgQuantizedLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ -chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) -chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) -C.atg_quantized_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) -} -func AtgQuantizedMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) -C.atg_quantized_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgQuantizedRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ -C.atg_quantized_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) -} -func AtgQuantizedRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ -C.atg_quantized_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) -} -func AtgRand(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_rand(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgRandLike(ptr *Ctensor, self Ctensor){ -C.atg_rand_like(ptr, self) -} -func AtgRandOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_rand_out(ptr, out, csizeDataPtr, csizeLen) -} -func AtgRandint(ptr *Ctensor, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -chigh := *(*C.int64_t)(unsafe.Pointer(&high)) -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_randint(ptr, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgRandint1(ptr *Ctensor, low int64, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -clow := *(*C.int64_t)(unsafe.Pointer(&low)) -chigh := *(*C.int64_t)(unsafe.Pointer(&high)) -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_randint1(ptr, clow, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgRandintLike(ptr *Ctensor, self Ctensor, high int64){ -chigh := *(*C.int64_t)(unsafe.Pointer(&high)) -C.atg_randint_like(ptr, self, chigh) -} -func AtgRandintLike1(ptr *Ctensor, self Ctensor, low int64, high int64){ -clow := *(*C.int64_t)(unsafe.Pointer(&low)) -chigh := *(*C.int64_t)(unsafe.Pointer(&high)) -C.atg_randint_like1(ptr, self, clow, chigh) -} -func AtgRandintOut(ptr *Ctensor, out Ctensor, high int64, sizeData []int64, sizeLen int){ -chigh := *(*C.int64_t)(unsafe.Pointer(&high)) -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_randint_out(ptr, out, chigh, csizeDataPtr, csizeLen) -} -func AtgRandintOut1(ptr *Ctensor, out Ctensor, low int64, high int64, sizeData []int64, sizeLen int){ -clow := *(*C.int64_t)(unsafe.Pointer(&low)) -chigh := *(*C.int64_t)(unsafe.Pointer(&high)) -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_randint_out1(ptr, out, clow, chigh, csizeDataPtr, csizeLen) -} -func AtgRandn(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_randn(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgRandnLike(ptr *Ctensor, self Ctensor){ -C.atg_randn_like(ptr, self) -} -func AtgRandnOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_randn_out(ptr, out, csizeDataPtr, csizeLen) -} -func AtgRandom_(ptr *Ctensor, self Ctensor){ -C.atg_random_(ptr, self) -} -func AtgRandom1_(ptr *Ctensor, self Ctensor, to int64){ -cto := *(*C.int64_t)(unsafe.Pointer(&to)) -C.atg_random_1(ptr, self, cto) -} -func AtgRandom2(ptr *Ctensor, self Ctensor, from int64, to int64){ -cfrom := *(*C.int64_t)(unsafe.Pointer(&from)) -cto := *(*C.int64_t)(unsafe.Pointer(&to)) -C.atg_random_2(ptr, self, cfrom, cto) -} -func AtgRandperm(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_randperm(ptr, cn, coptionsKind, coptionsDevice) -} -func AtgRandpermOut(ptr *Ctensor, out Ctensor, n int64){ -cn := *(*C.int64_t)(unsafe.Pointer(&n)) -C.atg_randperm_out(ptr, out, cn) -} -func AtgRange(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_range(ptr, start , end , coptionsKind, coptionsDevice) -} -func AtgRange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_range1(ptr, start , end , coptionsKind, coptionsDevice) -} -func AtgRangeOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar){ -C.atg_range_out(ptr, out, start , end ) -} -func AtgReal(ptr *Ctensor, self Ctensor){ -C.atg_real(ptr, self) -} -func AtgReciprocal(ptr *Ctensor, self Ctensor){ -C.atg_reciprocal(ptr, self) -} -func AtgReciprocal_(ptr *Ctensor, self Ctensor){ -C.atg_reciprocal_(ptr, self) -} -func AtgReciprocalOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_reciprocal_out(ptr, out, self) -} -func AtgReflectionPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_reflection_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_reflection_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_reflection_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_reflection_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_reflection_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_reflection_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_reflection_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_reflection_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgRelu(ptr *Ctensor, self Ctensor){ -C.atg_relu(ptr, self) -} -func AtgRelu_(ptr *Ctensor, self Ctensor){ -C.atg_relu_(ptr, self) -} -func AtgRemainder(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_remainder(ptr, self, other ) -} -func AtgRemainder1(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_remainder1(ptr, self, other) -} -func AtgRemainder_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_remainder_(ptr, self, other ) -} -func AtgRemainder1_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_remainder_1(ptr, self, other) -} -func AtgRemainderOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ -C.atg_remainder_out(ptr, out, self, other ) -} -func AtgRemainderOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_remainder_out1(ptr, out, self, other) -} -func AtgRenorm(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_renorm(ptr, self, p , cdim, maxnorm ) -} -func AtgRenorm_(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_renorm_(ptr, self, p , cdim, maxnorm ) -} -func AtgRenormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_renorm_out(ptr, out, self, p , cdim, maxnorm ) -} -func AtgRepeat(ptr *Ctensor, self Ctensor, repeatsData []int64, repeatsLen int){ -crepeatsDataPtr := (*C.int64_t)(unsafe.Pointer(&repeatsData[0])) -crepeatsLen := *(*C.int)(unsafe.Pointer(&repeatsLen)) -C.atg_repeat(ptr, self, crepeatsDataPtr, crepeatsLen) -} -func AtgRepeatInterleave(ptr *Ctensor, repeats Ctensor){ -C.atg_repeat_interleave(ptr, repeats) -} -func AtgRepeatInterleave1(ptr *Ctensor, self Ctensor, repeats Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_repeat_interleave1(ptr, self, repeats, cdim) -} -func AtgRepeatInterleave2(ptr *Ctensor, self Ctensor, repeats int64, dim int64){ -crepeats := *(*C.int64_t)(unsafe.Pointer(&repeats)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_repeat_interleave2(ptr, self, crepeats, cdim) -} -func AtgReplicationPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad3d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad3d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad3d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad3d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad3dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_replication_pad3d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgRequiresGrad_(ptr *Ctensor, self Ctensor, requiresGrad int32){ -crequiresGrad := *(*C.int)(unsafe.Pointer(&requiresGrad)) -C.atg_requires_grad_(ptr, self, crequiresGrad) -} -func AtgReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){ -cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) -cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) -C.atg_reshape(ptr, self, cshapeDataPtr, cshapeLen) -} -func AtgReshapeAs(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_reshape_as(ptr, self, other) -} -func AtgResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_resize_(ptr, self, csizeDataPtr, csizeLen) -} -func AtgResizeAs_(ptr *Ctensor, self Ctensor, theTemplate Ctensor){ -C.atg_resize_as_(ptr, self, theTemplate) -} -func AtgRfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32){ -csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) -cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) -conesided := *(*C.int)(unsafe.Pointer(&onesided)) -C.atg_rfft(ptr, self, csignalNdim, cnormalized, conesided) -} -func AtgRnnRelu(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -C.atg_rnn_relu(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgRnnRelu1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -C.atg_rnn_relu1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ -C.atg_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh) -} -func AtgRnnTanh(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) -C.atg_rnn_tanh(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgRnnTanh1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ -cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) -cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) -chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) -cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) -cdropout := *(*C.double)(unsafe.Pointer(&dropout)) -ctrain := *(*C.int)(unsafe.Pointer(&train)) -cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) -C.atg_rnn_tanh1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ -C.atg_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh) -} -func AtgRoll(ptr *Ctensor, self Ctensor, shiftsData []int64, shiftsLen int, dimsData []int64, dimsLen int){ -cshiftsDataPtr := (*C.int64_t)(unsafe.Pointer(&shiftsData[0])) -cshiftsLen := *(*C.int)(unsafe.Pointer(&shiftsLen)) -cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) -cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) -C.atg_roll(ptr, self, cshiftsDataPtr, cshiftsLen, cdimsDataPtr, cdimsLen) -} -func AtgRot90(ptr *Ctensor, self Ctensor, k int64, dimsData []int64, dimsLen int){ -ck := *(*C.int64_t)(unsafe.Pointer(&k)) -cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) -cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) -C.atg_rot90(ptr, self, ck, cdimsDataPtr, cdimsLen) -} -func AtgRound(ptr *Ctensor, self Ctensor){ -C.atg_round(ptr, self) -} -func AtgRound_(ptr *Ctensor, self Ctensor){ -C.atg_round_(ptr, self) -} -func AtgRoundOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_round_out(ptr, out, self) -} -func AtgRrelu(ptr *Ctensor, self Ctensor, training int32){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -C.atg_rrelu(ptr, self, ctraining) -} -func AtgRrelu_(ptr *Ctensor, self Ctensor, training int32){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -C.atg_rrelu_(ptr, self, ctraining) -} -func AtgRreluWithNoise(ptr *Ctensor, self Ctensor, noise Ctensor, training int32){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -C.atg_rrelu_with_noise(ptr, self, noise, ctraining) -} -func AtgRreluWithNoise_(ptr *Ctensor, self Ctensor, noise Ctensor, training int32){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -C.atg_rrelu_with_noise_(ptr, self, noise, ctraining) -} -func AtgRreluWithNoiseBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, noise Ctensor, lower Cscalar, upper Cscalar, training int32, selfIsResult int32){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) -C.atg_rrelu_with_noise_backward(ptr, gradOutput, self, noise, lower , upper , ctraining, cselfIsResult) -} -func AtgRreluWithNoiseOut(ptr *Ctensor, out Ctensor, self Ctensor, noise Ctensor, training int32){ -ctraining := *(*C.int)(unsafe.Pointer(&training)) -C.atg_rrelu_with_noise_out(ptr, out, self, noise, ctraining) -} -func AtgRsqrt(ptr *Ctensor, self Ctensor){ -C.atg_rsqrt(ptr, self) -} -func AtgRsqrt_(ptr *Ctensor, self Ctensor){ -C.atg_rsqrt_(ptr, self) -} -func AtgRsqrtOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_rsqrt_out(ptr, out, self) -} -func AtgRsub(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_rsub(ptr, self, other) -} -func AtgRsub1(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_rsub1(ptr, self, other ) +func AtgNonzeroOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_nonzero_out(ptr, out, self) +} +func AtgNorm(ptr *Ctensor, self Ctensor) { + C.atg_norm(ptr, self) +} +func AtgNorm1(ptr *Ctensor, self Ctensor, p Cscalar, dtype int32) { + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_norm1(ptr, self, p, cdtype) +} +func AtgNorm2(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_norm2(ptr, self, p, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNorm3(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_norm3(ptr, self, p, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNormExceptDim(ptr *Ctensor, v Ctensor, pow int64, dim int64) { + cpow := *(*C.int64_t)(unsafe.Pointer(&pow)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_norm_except_dim(ptr, v, cpow, cdim) +} +func AtgNormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_norm_out(ptr, out, self, p, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_norm_out1(ptr, out, self, p, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64) { + cmean := *(*C.double)(unsafe.Pointer(&mean)) + cstd := *(*C.double)(unsafe.Pointer(&std)) + C.atg_normal_(ptr, self, cmean, cstd) +} +func AtgNormalOut(ptr *Ctensor, out Ctensor, mean Ctensor, std float64) { + cstd := *(*C.double)(unsafe.Pointer(&std)) + C.atg_normal_out(ptr, out, mean, cstd) +} +func AtgNormalOut1(ptr *Ctensor, out Ctensor, mean float64, std Ctensor) { + cmean := *(*C.double)(unsafe.Pointer(&mean)) + C.atg_normal_out1(ptr, out, cmean, std) +} +func AtgNormalOut2(ptr *Ctensor, out Ctensor, mean Ctensor, std Ctensor) { + C.atg_normal_out2(ptr, out, mean, std) +} +func AtgNormalOut3(ptr *Ctensor, out Ctensor, mean float64, std float64, sizeData []int64, sizeLen int) { + cmean := *(*C.double)(unsafe.Pointer(&mean)) + cstd := *(*C.double)(unsafe.Pointer(&std)) + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_normal_out3(ptr, out, cmean, cstd, csizeDataPtr, csizeLen) +} +func AtgNuclearNorm(ptr *Ctensor, self Ctensor, keepdim int32) { + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_nuclear_norm(ptr, self, ckeepdim) +} +func AtgNuclearNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_nuclear_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNuclearNormOut(ptr *Ctensor, out Ctensor, self Ctensor, keepdim int32) { + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_nuclear_norm_out(ptr, out, self, ckeepdim) +} +func AtgNuclearNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_nuclear_norm_out1(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNumpyT(ptr *Ctensor, self Ctensor) { + C.atg_numpy_t(ptr, self) +} +func AtgOneHot(ptr *Ctensor, self Ctensor, numClasses int64) { + cnumClasses := *(*C.int64_t)(unsafe.Pointer(&numClasses)) + C.atg_one_hot(ptr, self, cnumClasses) +} +func AtgOnes(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_ones(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgOnesLike(ptr *Ctensor, self Ctensor) { + C.atg_ones_like(ptr, self) +} +func AtgOnesOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_ones_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgOrgqr(ptr *Ctensor, self Ctensor, input2 Ctensor) { + C.atg_orgqr(ptr, self, input2) +} +func AtgOrgqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor) { + C.atg_orgqr_out(ptr, out, self, input2) +} +func AtgOrmqr(ptr *Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32) { + cleft := *(*C.int)(unsafe.Pointer(&left)) + ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) + C.atg_ormqr(ptr, self, input2, input3, cleft, ctranspose) +} +func AtgOrmqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32) { + cleft := *(*C.int)(unsafe.Pointer(&left)) + ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) + C.atg_ormqr_out(ptr, out, self, input2, input3, cleft, ctranspose) +} +func AtgPairwiseDistance(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, eps float64, keepdim int32) { + cp := *(*C.double)(unsafe.Pointer(&p)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_pairwise_distance(ptr, x1, x2, cp, ceps, ckeepdim) +} +func AtgPdist(ptr *Ctensor, self Ctensor, p float64) { + cp := *(*C.double)(unsafe.Pointer(&p)) + C.atg_pdist(ptr, self, cp) +} +func AtgPermute(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int) { + cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) + cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) + C.atg_permute(ptr, self, cdimsDataPtr, cdimsLen) +} +func AtgPinMemory(ptr *Ctensor, self Ctensor) { + C.atg_pin_memory(ptr, self) +} +func AtgPinverse(ptr *Ctensor, self Ctensor, rcond float64) { + crcond := *(*C.double)(unsafe.Pointer(&rcond)) + C.atg_pinverse(ptr, self, crcond) +} +func AtgPixelShuffle(ptr *Ctensor, self Ctensor, upscaleFactor int64) { + cupscaleFactor := *(*C.int64_t)(unsafe.Pointer(&upscaleFactor)) + C.atg_pixel_shuffle(ptr, self, cupscaleFactor) +} +func AtgPoisson(ptr *Ctensor, self Ctensor) { + C.atg_poisson(ptr, self) +} +func AtgPoissonNllLoss(ptr *Ctensor, input Ctensor, target Ctensor, logInput int32, full int32, eps float64, reduction int64) { + clogInput := *(*C.int)(unsafe.Pointer(&logInput)) + cfull := *(*C.int)(unsafe.Pointer(&full)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_poisson_nll_loss(ptr, input, target, clogInput, cfull, ceps, creduction) +} +func AtgPolygamma(ptr *Ctensor, n int64, self Ctensor) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + C.atg_polygamma(ptr, cn, self) +} +func AtgPolygamma_(ptr *Ctensor, self Ctensor, n int64) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + C.atg_polygamma_(ptr, self, cn) +} +func AtgPolygammaOut(ptr *Ctensor, out Ctensor, n int64, self Ctensor) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + C.atg_polygamma_out(ptr, out, cn, self) +} +func AtgPow(ptr *Ctensor, self Ctensor, exponent Cscalar) { + C.atg_pow(ptr, self, exponent) +} +func AtgPow1(ptr *Ctensor, self Ctensor, exponent Ctensor) { + C.atg_pow1(ptr, self, exponent) +} +func AtgPow2(ptr *Ctensor, selfScalar Cscalar, exponent Ctensor) { + C.atg_pow2(ptr, selfScalar, exponent) +} +func AtgPow_(ptr *Ctensor, self Ctensor, exponent Cscalar) { + C.atg_pow_(ptr, self, exponent) +} +func AtgPow1_(ptr *Ctensor, self Ctensor, exponent Ctensor) { + C.atg_pow_1(ptr, self, exponent) +} +func AtgPowOut(ptr *Ctensor, out Ctensor, self Ctensor, exponent Cscalar) { + C.atg_pow_out(ptr, out, self, exponent) +} +func AtgPowOut1(ptr *Ctensor, out Ctensor, self Ctensor, exponent Ctensor) { + C.atg_pow_out1(ptr, out, self, exponent) +} +func AtgPowOut2(ptr *Ctensor, out Ctensor, selfScalar Cscalar, exponent Ctensor) { + C.atg_pow_out2(ptr, out, selfScalar, exponent) +} +func AtgPrelu(ptr *Ctensor, self Ctensor, weight Ctensor) { + C.atg_prelu(ptr, self, weight) +} +func AtgPreluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor) { + C.atg_prelu_backward(ptr, gradOutput, self, weight) +} +func AtgProd(ptr *Ctensor, self Ctensor, dtype int32) { + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_prod(ptr, self, cdtype) +} +func AtgProd1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_prod1(ptr, self, cdim, ckeepdim, cdtype) +} +func AtgProdOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_prod_out(ptr, out, self, cdim, ckeepdim, cdtype) +} +func AtgPut_(ptr *Ctensor, self Ctensor, index Ctensor, source Ctensor, accumulate int32) { + caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) + C.atg_put_(ptr, self, index, source, caccumulate) +} +func AtgQPerChannelScales(ptr *Ctensor, self Ctensor) { + C.atg_q_per_channel_scales(ptr, self) +} +func AtgQPerChannelZeroPoints(ptr *Ctensor, self Ctensor) { + C.atg_q_per_channel_zero_points(ptr, self) +} +func AtgQr(ptr *Ctensor, self Ctensor, some int32) { + csome := *(*C.int)(unsafe.Pointer(&some)) + C.atg_qr(ptr, self, csome) +} +func AtgQrOut(ptr *Ctensor, q Ctensor, r Ctensor, self Ctensor, some int32) { + csome := *(*C.int)(unsafe.Pointer(&some)) + C.atg_qr_out(ptr, q, r, self, csome) +} +func AtgQuantizePerChannel(ptr *Ctensor, self Ctensor, scales Ctensor, zeroPoints Ctensor, axis int64, dtype int32) { + caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_quantize_per_channel(ptr, self, scales, zeroPoints, caxis, cdtype) +} +func AtgQuantizePerTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, dtype int32) { + cscale := *(*C.double)(unsafe.Pointer(&scale)) + czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_quantize_per_tensor(ptr, self, cscale, czeroPoint, cdtype) +} +func AtgQuantizedBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, vari Ctensor, eps float64, outputScale float64, outputZeroPoint int64) { + ceps := *(*C.double)(unsafe.Pointer(&eps)) + coutputScale := *(*C.double)(unsafe.Pointer(&outputScale)) + coutputZeroPoint := *(*C.int64_t)(unsafe.Pointer(&outputZeroPoint)) + C.atg_quantized_batch_norm(ptr, input, weight, bias, mean, vari, ceps, coutputScale, coutputZeroPoint) +} +func AtgQuantizedGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + C.atg_quantized_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgQuantizedGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + C.atg_quantized_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgQuantizedGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar) { + C.atg_quantized_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) +} +func AtgQuantizedLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32, dtype int32, useDynamic int32) { + chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) + chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic)) + C.atg_quantized_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst, cdtype, cuseDynamic) +} +func AtgQuantizedLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, dtype int32, useDynamic int32) { + chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) + chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic)) + C.atg_quantized_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cdtype, cuseDynamic) +} +func AtgQuantizedLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar) { + chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) + chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) + C.atg_quantized_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) +} +func AtgQuantizedMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) + C.atg_quantized_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgQuantizedRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar) { + C.atg_quantized_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) +} +func AtgQuantizedRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar) { + C.atg_quantized_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) +} +func AtgRand(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_rand(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandLike(ptr *Ctensor, self Ctensor) { + C.atg_rand_like(ptr, self) +} +func AtgRandOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_rand_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgRandint(ptr *Ctensor, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + chigh := *(*C.int64_t)(unsafe.Pointer(&high)) + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_randint(ptr, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandint1(ptr *Ctensor, low int64, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + clow := *(*C.int64_t)(unsafe.Pointer(&low)) + chigh := *(*C.int64_t)(unsafe.Pointer(&high)) + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_randint1(ptr, clow, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandintLike(ptr *Ctensor, self Ctensor, high int64) { + chigh := *(*C.int64_t)(unsafe.Pointer(&high)) + C.atg_randint_like(ptr, self, chigh) +} +func AtgRandintLike1(ptr *Ctensor, self Ctensor, low int64, high int64) { + clow := *(*C.int64_t)(unsafe.Pointer(&low)) + chigh := *(*C.int64_t)(unsafe.Pointer(&high)) + C.atg_randint_like1(ptr, self, clow, chigh) +} +func AtgRandintOut(ptr *Ctensor, out Ctensor, high int64, sizeData []int64, sizeLen int) { + chigh := *(*C.int64_t)(unsafe.Pointer(&high)) + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_randint_out(ptr, out, chigh, csizeDataPtr, csizeLen) +} +func AtgRandintOut1(ptr *Ctensor, out Ctensor, low int64, high int64, sizeData []int64, sizeLen int) { + clow := *(*C.int64_t)(unsafe.Pointer(&low)) + chigh := *(*C.int64_t)(unsafe.Pointer(&high)) + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_randint_out1(ptr, out, clow, chigh, csizeDataPtr, csizeLen) +} +func AtgRandn(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_randn(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandnLike(ptr *Ctensor, self Ctensor) { + C.atg_randn_like(ptr, self) +} +func AtgRandnOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_randn_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgRandom_(ptr *Ctensor, self Ctensor) { + C.atg_random_(ptr, self) +} +func AtgRandom1_(ptr *Ctensor, self Ctensor, to int64) { + cto := *(*C.int64_t)(unsafe.Pointer(&to)) + C.atg_random_1(ptr, self, cto) +} +func AtgRandom2(ptr *Ctensor, self Ctensor, from int64, to int64) { + cfrom := *(*C.int64_t)(unsafe.Pointer(&from)) + cto := *(*C.int64_t)(unsafe.Pointer(&to)) + C.atg_random_2(ptr, self, cfrom, cto) +} +func AtgRandperm(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_randperm(ptr, cn, coptionsKind, coptionsDevice) +} +func AtgRandpermOut(ptr *Ctensor, out Ctensor, n int64) { + cn := *(*C.int64_t)(unsafe.Pointer(&n)) + C.atg_randperm_out(ptr, out, cn) +} +func AtgRange(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32) { + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_range(ptr, start, end, coptionsKind, coptionsDevice) +} +func AtgRange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32) { + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_range1(ptr, start, end, coptionsKind, coptionsDevice) +} +func AtgRangeOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar) { + C.atg_range_out(ptr, out, start, end) +} +func AtgReal(ptr *Ctensor, self Ctensor) { + C.atg_real(ptr, self) +} +func AtgReciprocal(ptr *Ctensor, self Ctensor) { + C.atg_reciprocal(ptr, self) +} +func AtgReciprocal_(ptr *Ctensor, self Ctensor) { + C.atg_reciprocal_(ptr, self) +} +func AtgReciprocalOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_reciprocal_out(ptr, out, self) +} +func AtgReflectionPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_reflection_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_reflection_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_reflection_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_reflection_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_reflection_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_reflection_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_reflection_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_reflection_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgRelu(ptr *Ctensor, self Ctensor) { + C.atg_relu(ptr, self) +} +func AtgRelu_(ptr *Ctensor, self Ctensor) { + C.atg_relu_(ptr, self) +} +func AtgRemainder(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_remainder(ptr, self, other) +} +func AtgRemainder1(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_remainder1(ptr, self, other) +} +func AtgRemainder_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_remainder_(ptr, self, other) +} +func AtgRemainder1_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_remainder_1(ptr, self, other) +} +func AtgRemainderOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { + C.atg_remainder_out(ptr, out, self, other) +} +func AtgRemainderOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_remainder_out1(ptr, out, self, other) +} +func AtgRenorm(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_renorm(ptr, self, p, cdim, maxnorm) +} +func AtgRenorm_(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_renorm_(ptr, self, p, cdim, maxnorm) +} +func AtgRenormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_renorm_out(ptr, out, self, p, cdim, maxnorm) +} +func AtgRepeat(ptr *Ctensor, self Ctensor, repeatsData []int64, repeatsLen int) { + crepeatsDataPtr := (*C.int64_t)(unsafe.Pointer(&repeatsData[0])) + crepeatsLen := *(*C.int)(unsafe.Pointer(&repeatsLen)) + C.atg_repeat(ptr, self, crepeatsDataPtr, crepeatsLen) +} +func AtgRepeatInterleave(ptr *Ctensor, repeats Ctensor) { + C.atg_repeat_interleave(ptr, repeats) +} +func AtgRepeatInterleave1(ptr *Ctensor, self Ctensor, repeats Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_repeat_interleave1(ptr, self, repeats, cdim) +} +func AtgRepeatInterleave2(ptr *Ctensor, self Ctensor, repeats int64, dim int64) { + crepeats := *(*C.int64_t)(unsafe.Pointer(&repeats)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_repeat_interleave2(ptr, self, crepeats, cdim) +} +func AtgReplicationPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad3d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad3d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad3d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_replication_pad3d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgRequiresGrad_(ptr *Ctensor, self Ctensor, requiresGrad int32) { + crequiresGrad := *(*C.int)(unsafe.Pointer(&requiresGrad)) + C.atg_requires_grad_(ptr, self, crequiresGrad) +} +func AtgReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int) { + cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) + cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) + C.atg_reshape(ptr, self, cshapeDataPtr, cshapeLen) +} +func AtgReshapeAs(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_reshape_as(ptr, self, other) +} +func AtgResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_resize_(ptr, self, csizeDataPtr, csizeLen) +} +func AtgResizeAs_(ptr *Ctensor, self Ctensor, theTemplate Ctensor) { + C.atg_resize_as_(ptr, self, theTemplate) +} +func AtgRfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32) { + csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) + cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) + conesided := *(*C.int)(unsafe.Pointer(&onesided)) + C.atg_rfft(ptr, self, csignalNdim, cnormalized, conesided) +} +func AtgRnnRelu(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + C.atg_rnn_relu(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgRnnRelu1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + C.atg_rnn_relu1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor) { + C.atg_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgRnnTanh(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) + C.atg_rnn_tanh(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgRnnTanh1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { + cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) + cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) + chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) + cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) + cdropout := *(*C.double)(unsafe.Pointer(&dropout)) + ctrain := *(*C.int)(unsafe.Pointer(&train)) + cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) + C.atg_rnn_tanh1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor) { + C.atg_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgRoll(ptr *Ctensor, self Ctensor, shiftsData []int64, shiftsLen int, dimsData []int64, dimsLen int) { + cshiftsDataPtr := (*C.int64_t)(unsafe.Pointer(&shiftsData[0])) + cshiftsLen := *(*C.int)(unsafe.Pointer(&shiftsLen)) + cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) + cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) + C.atg_roll(ptr, self, cshiftsDataPtr, cshiftsLen, cdimsDataPtr, cdimsLen) +} +func AtgRot90(ptr *Ctensor, self Ctensor, k int64, dimsData []int64, dimsLen int) { + ck := *(*C.int64_t)(unsafe.Pointer(&k)) + cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) + cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) + C.atg_rot90(ptr, self, ck, cdimsDataPtr, cdimsLen) +} +func AtgRound(ptr *Ctensor, self Ctensor) { + C.atg_round(ptr, self) +} +func AtgRound_(ptr *Ctensor, self Ctensor) { + C.atg_round_(ptr, self) +} +func AtgRoundOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_round_out(ptr, out, self) +} +func AtgRrelu(ptr *Ctensor, self Ctensor, training int32) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + C.atg_rrelu(ptr, self, ctraining) +} +func AtgRrelu_(ptr *Ctensor, self Ctensor, training int32) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + C.atg_rrelu_(ptr, self, ctraining) +} +func AtgRreluWithNoise(ptr *Ctensor, self Ctensor, noise Ctensor, training int32) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + C.atg_rrelu_with_noise(ptr, self, noise, ctraining) +} +func AtgRreluWithNoise_(ptr *Ctensor, self Ctensor, noise Ctensor, training int32) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + C.atg_rrelu_with_noise_(ptr, self, noise, ctraining) +} +func AtgRreluWithNoiseBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, noise Ctensor, lower Cscalar, upper Cscalar, training int32, selfIsResult int32) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) + C.atg_rrelu_with_noise_backward(ptr, gradOutput, self, noise, lower, upper, ctraining, cselfIsResult) +} +func AtgRreluWithNoiseOut(ptr *Ctensor, out Ctensor, self Ctensor, noise Ctensor, training int32) { + ctraining := *(*C.int)(unsafe.Pointer(&training)) + C.atg_rrelu_with_noise_out(ptr, out, self, noise, ctraining) +} +func AtgRsqrt(ptr *Ctensor, self Ctensor) { + C.atg_rsqrt(ptr, self) +} +func AtgRsqrt_(ptr *Ctensor, self Ctensor) { + C.atg_rsqrt_(ptr, self) +} +func AtgRsqrtOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_rsqrt_out(ptr, out, self) +} +func AtgRsub(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_rsub(ptr, self, other) +} +func AtgRsub1(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_rsub1(ptr, self, other) } -func AtgScalarTensor(ptr *Ctensor, s Cscalar, optionsKind int32, optionsDevice int32){ -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_scalar_tensor(ptr, s , coptionsKind, coptionsDevice) +func AtgScalarTensor(ptr *Ctensor, s Cscalar, optionsKind int32, optionsDevice int32) { + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_scalar_tensor(ptr, s, coptionsKind, coptionsDevice) } -func AtgScatter(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_scatter(ptr, self, cdim, index, src) +func AtgScatter(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_scatter(ptr, self, cdim, index, src) } -func AtgScatter1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_scatter1(ptr, self, cdim, index, value ) +func AtgScatter1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_scatter1(ptr, self, cdim, index, value) } -func AtgScatter_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_scatter_(ptr, self, cdim, index, src) -} -func AtgScatter1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_scatter_1(ptr, self, cdim, index, value ) +func AtgScatter_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_scatter_(ptr, self, cdim, index, src) +} +func AtgScatter1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_scatter_1(ptr, self, cdim, index, value) } -func AtgScatterAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_scatter_add(ptr, self, cdim, index, src) +func AtgScatterAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_scatter_add(ptr, self, cdim, index, src) } -func AtgScatterAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_scatter_add_(ptr, self, cdim, index, src) -} -func AtgSelect(ptr *Ctensor, self Ctensor, dim int64, index int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cindex := *(*C.int64_t)(unsafe.Pointer(&index)) -C.atg_select(ptr, self, cdim, cindex) +func AtgScatterAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_scatter_add_(ptr, self, cdim, index, src) +} +func AtgSelect(ptr *Ctensor, self Ctensor, dim int64, index int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cindex := *(*C.int64_t)(unsafe.Pointer(&index)) + C.atg_select(ptr, self, cdim, cindex) } -func AtgSelu(ptr *Ctensor, self Ctensor){ -C.atg_selu(ptr, self) -} -func AtgSelu_(ptr *Ctensor, self Ctensor){ -C.atg_selu_(ptr, self) -} -func AtgSet_(ptr *Ctensor, self Ctensor){ -C.atg_set_(ptr, self) -} -func AtgSet1_(ptr *Ctensor, self Ctensor, source Ctensor){ -C.atg_set_1(ptr, self, source) -} -func AtgSetRequiresGrad(ptr *Ctensor, self Ctensor, r int32){ -cr := *(*C.int)(unsafe.Pointer(&r)) -C.atg_set_requires_grad(ptr, self, cr) -} -func AtgSigmoid(ptr *Ctensor, self Ctensor){ -C.atg_sigmoid(ptr, self) -} -func AtgSigmoid_(ptr *Ctensor, self Ctensor){ -C.atg_sigmoid_(ptr, self) -} -func AtgSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor){ -C.atg_sigmoid_backward(ptr, gradOutput, output) -} -func AtgSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor){ -C.atg_sigmoid_backward_out(ptr, gradInput, gradOutput, output) -} -func AtgSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_sigmoid_out(ptr, out, self) -} -func AtgSign(ptr *Ctensor, self Ctensor){ -C.atg_sign(ptr, self) -} -func AtgSign_(ptr *Ctensor, self Ctensor){ -C.atg_sign_(ptr, self) -} -func AtgSignOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_sign_out(ptr, out, self) -} -func AtgSin(ptr *Ctensor, self Ctensor){ -C.atg_sin(ptr, self) -} -func AtgSin_(ptr *Ctensor, self Ctensor){ -C.atg_sin_(ptr, self) -} -func AtgSinOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_sin_out(ptr, out, self) -} -func AtgSinh(ptr *Ctensor, self Ctensor){ -C.atg_sinh(ptr, self) -} -func AtgSinh_(ptr *Ctensor, self Ctensor){ -C.atg_sinh_(ptr, self) -} -func AtgSinhOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_sinh_out(ptr, out, self) -} -func AtgSlice(ptr *Ctensor, self Ctensor, dim int64, start int64, end int64, step int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cstart := *(*C.int64_t)(unsafe.Pointer(&start)) -cend := *(*C.int64_t)(unsafe.Pointer(&end)) -cstep := *(*C.int64_t)(unsafe.Pointer(&step)) -C.atg_slice(ptr, self, cdim, cstart, cend, cstep) -} -func AtgSlogdet(ptr *Ctensor, self Ctensor){ -C.atg_slogdet(ptr, self) -} -func AtgSlowConv3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_slow_conv3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgSlowConv3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -C.atg_slow_conv3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgSlowConvDilated2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_slow_conv_dilated2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvDilated3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_slow_conv_dilated3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvTranspose2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_slow_conv_transpose2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvTranspose2dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_slow_conv_transpose2d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvTranspose3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_slow_conv_transpose3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvTranspose3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ -ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) -ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) -cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) -cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) -cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) -cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) -coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) -coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) -cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) -cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) -C.atg_slow_conv_transpose3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSmm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ -C.atg_smm(ptr, self, mat2) -} -func AtgSmoothL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_smooth_l1_loss(ptr, self, target, creduction) -} -func AtgSmoothL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_smooth_l1_loss_backward(ptr, gradOutput, self, target, creduction) -} -func AtgSmoothL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_smooth_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) -} -func AtgSmoothL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_smooth_l1_loss_out(ptr, out, self, target, creduction) -} -func AtgSoftMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_soft_margin_loss(ptr, self, target, creduction) -} -func AtgSoftMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_soft_margin_loss_backward(ptr, gradOutput, self, target, creduction) -} -func AtgSoftMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_soft_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) -} -func AtgSoftMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_soft_margin_loss_out(ptr, out, self, target, creduction) -} -func AtgSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_softmax(ptr, self, cdim, cdtype) -} -func AtgSoftplus(ptr *Ctensor, self Ctensor){ -C.atg_softplus(ptr, self) -} -func AtgSoftplusBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){ -C.atg_softplus_backward(ptr, gradOutput, self, beta , threshold , output) -} -func AtgSoftplusBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){ -C.atg_softplus_backward_out(ptr, gradInput, gradOutput, self, beta , threshold , output) -} -func AtgSoftplusOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_softplus_out(ptr, out, self) -} -func AtgSoftshrink(ptr *Ctensor, self Ctensor){ -C.atg_softshrink(ptr, self) -} -func AtgSoftshrinkBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar){ -C.atg_softshrink_backward(ptr, gradOutput, self, lambd ) -} -func AtgSoftshrinkBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar){ -C.atg_softshrink_backward_out(ptr, gradInput, gradOutput, self, lambd ) -} -func AtgSoftshrinkOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_softshrink_out(ptr, out, self) -} -func AtgSolve(ptr *Ctensor, self Ctensor, a Ctensor){ -C.atg_solve(ptr, self, a) -} -func AtgSolveOut(ptr *Ctensor, solution Ctensor, lu Ctensor, self Ctensor, a Ctensor){ -C.atg_solve_out(ptr, solution, lu, self, a) -} -func AtgSort(ptr *Ctensor, self Ctensor, dim int64, descending int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdescending := *(*C.int)(unsafe.Pointer(&descending)) -C.atg_sort(ptr, self, cdim, cdescending) -} -func AtgSortOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, descending int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -cdescending := *(*C.int)(unsafe.Pointer(&descending)) -C.atg_sort_out(ptr, values, indices, self, cdim, cdescending) -} -func AtgSparseCooTensor(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_sparse_coo_tensor(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgSparseCooTensor1(ptr *Ctensor, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32){ -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_sparse_coo_tensor1(ptr, indices, values, coptionsKind, coptionsDevice) -} -func AtgSparseCooTensor2(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_sparse_coo_tensor2(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgSparseMask(ptr *Ctensor, self Ctensor, mask Ctensor){ -C.atg_sparse_mask(ptr, self, mask) -} -func AtgSparseResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) -cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) -C.atg_sparse_resize_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) -} -func AtgSparseResizeAndClear_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) -cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) -C.atg_sparse_resize_and_clear_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) +func AtgSelu(ptr *Ctensor, self Ctensor) { + C.atg_selu(ptr, self) +} +func AtgSelu_(ptr *Ctensor, self Ctensor) { + C.atg_selu_(ptr, self) +} +func AtgSet_(ptr *Ctensor, self Ctensor) { + C.atg_set_(ptr, self) +} +func AtgSet1_(ptr *Ctensor, self Ctensor, source Ctensor) { + C.atg_set_1(ptr, self, source) +} +func AtgSetRequiresGrad(ptr *Ctensor, self Ctensor, r int32) { + cr := *(*C.int)(unsafe.Pointer(&r)) + C.atg_set_requires_grad(ptr, self, cr) +} +func AtgSigmoid(ptr *Ctensor, self Ctensor) { + C.atg_sigmoid(ptr, self) +} +func AtgSigmoid_(ptr *Ctensor, self Ctensor) { + C.atg_sigmoid_(ptr, self) +} +func AtgSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor) { + C.atg_sigmoid_backward(ptr, gradOutput, output) +} +func AtgSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor) { + C.atg_sigmoid_backward_out(ptr, gradInput, gradOutput, output) +} +func AtgSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_sigmoid_out(ptr, out, self) +} +func AtgSign(ptr *Ctensor, self Ctensor) { + C.atg_sign(ptr, self) +} +func AtgSign_(ptr *Ctensor, self Ctensor) { + C.atg_sign_(ptr, self) +} +func AtgSignOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_sign_out(ptr, out, self) +} +func AtgSin(ptr *Ctensor, self Ctensor) { + C.atg_sin(ptr, self) +} +func AtgSin_(ptr *Ctensor, self Ctensor) { + C.atg_sin_(ptr, self) +} +func AtgSinOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_sin_out(ptr, out, self) +} +func AtgSinh(ptr *Ctensor, self Ctensor) { + C.atg_sinh(ptr, self) +} +func AtgSinh_(ptr *Ctensor, self Ctensor) { + C.atg_sinh_(ptr, self) +} +func AtgSinhOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_sinh_out(ptr, out, self) +} +func AtgSlice(ptr *Ctensor, self Ctensor, dim int64, start int64, end int64, step int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cstart := *(*C.int64_t)(unsafe.Pointer(&start)) + cend := *(*C.int64_t)(unsafe.Pointer(&end)) + cstep := *(*C.int64_t)(unsafe.Pointer(&step)) + C.atg_slice(ptr, self, cdim, cstart, cend, cstep) +} +func AtgSlogdet(ptr *Ctensor, self Ctensor) { + C.atg_slogdet(ptr, self) +} +func AtgSlowConv3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_slow_conv3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgSlowConv3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + C.atg_slow_conv3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgSlowConvDilated2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_slow_conv_dilated2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvDilated3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_slow_conv_dilated3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_slow_conv_transpose2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose2dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_slow_conv_transpose2d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_slow_conv_transpose3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int) { + ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) + ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) + cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) + cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) + cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) + cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) + coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) + coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) + cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) + cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) + C.atg_slow_conv_transpose3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSmm(ptr *Ctensor, self Ctensor, mat2 Ctensor) { + C.atg_smm(ptr, self, mat2) +} +func AtgSmoothL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_smooth_l1_loss(ptr, self, target, creduction) +} +func AtgSmoothL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_smooth_l1_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgSmoothL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_smooth_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgSmoothL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_smooth_l1_loss_out(ptr, out, self, target, creduction) +} +func AtgSoftMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_soft_margin_loss(ptr, self, target, creduction) +} +func AtgSoftMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_soft_margin_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgSoftMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_soft_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgSoftMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_soft_margin_loss_out(ptr, out, self, target, creduction) +} +func AtgSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_softmax(ptr, self, cdim, cdtype) +} +func AtgSoftplus(ptr *Ctensor, self Ctensor) { + C.atg_softplus(ptr, self) +} +func AtgSoftplusBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor) { + C.atg_softplus_backward(ptr, gradOutput, self, beta, threshold, output) +} +func AtgSoftplusBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor) { + C.atg_softplus_backward_out(ptr, gradInput, gradOutput, self, beta, threshold, output) +} +func AtgSoftplusOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_softplus_out(ptr, out, self) +} +func AtgSoftshrink(ptr *Ctensor, self Ctensor) { + C.atg_softshrink(ptr, self) +} +func AtgSoftshrinkBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar) { + C.atg_softshrink_backward(ptr, gradOutput, self, lambd) +} +func AtgSoftshrinkBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar) { + C.atg_softshrink_backward_out(ptr, gradInput, gradOutput, self, lambd) +} +func AtgSoftshrinkOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_softshrink_out(ptr, out, self) +} +func AtgSolve(ptr *Ctensor, self Ctensor, a Ctensor) { + C.atg_solve(ptr, self, a) +} +func AtgSolveOut(ptr *Ctensor, solution Ctensor, lu Ctensor, self Ctensor, a Ctensor) { + C.atg_solve_out(ptr, solution, lu, self, a) +} +func AtgSort(ptr *Ctensor, self Ctensor, dim int64, descending int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdescending := *(*C.int)(unsafe.Pointer(&descending)) + C.atg_sort(ptr, self, cdim, cdescending) +} +func AtgSortOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, descending int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + cdescending := *(*C.int)(unsafe.Pointer(&descending)) + C.atg_sort_out(ptr, values, indices, self, cdim, cdescending) +} +func AtgSparseCooTensor(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_sparse_coo_tensor(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgSparseCooTensor1(ptr *Ctensor, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32) { + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_sparse_coo_tensor1(ptr, indices, values, coptionsKind, coptionsDevice) +} +func AtgSparseCooTensor2(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_sparse_coo_tensor2(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgSparseMask(ptr *Ctensor, self Ctensor, mask Ctensor) { + C.atg_sparse_mask(ptr, self, mask) +} +func AtgSparseResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) + cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) + C.atg_sparse_resize_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) +} +func AtgSparseResizeAndClear_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) + cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) + C.atg_sparse_resize_and_clear_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) } - -func AtgSqrt(ptr *Ctensor, self Ctensor){ -C.atg_sqrt(ptr, self) -} -func AtgSqrt_(ptr *Ctensor, self Ctensor){ -C.atg_sqrt_(ptr, self) -} -func AtgSqrtOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_sqrt_out(ptr, out, self) -} -func AtgSquare(ptr *Ctensor, self Ctensor){ -C.atg_square(ptr, self) -} -func AtgSquare_(ptr *Ctensor, self Ctensor){ -C.atg_square_(ptr, self) -} -func AtgSqueeze(ptr *Ctensor, self Ctensor){ -C.atg_squeeze(ptr, self) -} -func AtgSqueeze1(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_squeeze1(ptr, self, cdim) -} -func AtgSqueeze_(ptr *Ctensor, self Ctensor){ -C.atg_squeeze_(ptr, self) -} -func AtgSqueeze1_(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_squeeze_1(ptr, self, cdim) -} -func AtgSspaddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ -C.atg_sspaddmm(ptr, self, mat1, mat2) -} -func AtgSspaddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ -C.atg_sspaddmm_out(ptr, out, self, mat1, mat2) -} -func AtgStack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ -ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) -ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_stack(ptr, ctensorsDataPtr, ctensorsLen, cdim) -} -func AtgStackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ -ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) -ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_stack_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) -} -func AtgStd(ptr *Ctensor, self Ctensor, unbiased int32){ -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -C.atg_std(ptr, self, cunbiased) -} -func AtgStd1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_std1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) -} -func AtgStdMean(ptr *Ctensor, self Ctensor, unbiased int32){ -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -C.atg_std_mean(ptr, self, cunbiased) -} -func AtgStdMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_std_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) -} -func AtgStdOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_std_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) -} -func AtgStft(ptr *Ctensor, self Ctensor, nFft int64, hopLength int64, winLength int64, window Ctensor, normalized int32, onesided int32){ -cnFft := *(*C.int64_t)(unsafe.Pointer(&nFft)) -chopLength := *(*C.int64_t)(unsafe.Pointer(&hopLength)) -cwinLength := *(*C.int64_t)(unsafe.Pointer(&winLength)) -cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) -conesided := *(*C.int)(unsafe.Pointer(&onesided)) -C.atg_stft(ptr, self, cnFft, chopLength, cwinLength, window, cnormalized, conesided) -} -func AtgSub(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_sub(ptr, self, other) -} -func AtgSub1(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_sub1(ptr, self, other ) -} -func AtgSub_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_sub_(ptr, self, other) -} -func AtgSub1_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_sub_1(ptr, self, other ) -} -func AtgSubOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_sub_out(ptr, out, self, other) -} -func AtgSum(ptr *Ctensor, self Ctensor, dtype int32){ -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_sum(ptr, self, cdtype) +func AtgSqrt(ptr *Ctensor, self Ctensor) { + C.atg_sqrt(ptr, self) } -func AtgSum1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_sum1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgSumOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -C.atg_sum_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgSumToSize(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_sum_to_size(ptr, self, csizeDataPtr, csizeLen) -} -func AtgSvd(ptr *Ctensor, self Ctensor, some int32, computeUv int32){ -csome := *(*C.int)(unsafe.Pointer(&some)) -ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) -C.atg_svd(ptr, self, csome, ccomputeUv) -} -func AtgSvdOut(ptr *Ctensor, u Ctensor, s Ctensor, v Ctensor, self Ctensor, some int32, computeUv int32){ -csome := *(*C.int)(unsafe.Pointer(&some)) -ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) -C.atg_svd_out(ptr, u, s, v, self, csome, ccomputeUv) -} -func AtgSymeig(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){ -ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg_symeig(ptr, self, ceigenvectors, cupper) -} -func AtgSymeigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32, upper int32){ -ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) -cupper := *(*C.int)(unsafe.Pointer(&upper)) -C.atg_symeig_out(ptr, e, v, self, ceigenvectors, cupper) -} -func AtgT(ptr *Ctensor, self Ctensor){ -C.atg_t(ptr, self) -} -func AtgT_(ptr *Ctensor, self Ctensor){ -C.atg_t_(ptr, self) -} -func AtgTake(ptr *Ctensor, self Ctensor, index Ctensor){ -C.atg_take(ptr, self, index) -} -func AtgTakeOut(ptr *Ctensor, out Ctensor, self Ctensor, index Ctensor){ -C.atg_take_out(ptr, out, self, index) -} -func AtgTan(ptr *Ctensor, self Ctensor){ -C.atg_tan(ptr, self) -} -func AtgTan_(ptr *Ctensor, self Ctensor){ -C.atg_tan_(ptr, self) -} -func AtgTanOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_tan_out(ptr, out, self) -} -func AtgTanh(ptr *Ctensor, self Ctensor){ -C.atg_tanh(ptr, self) -} -func AtgTanh_(ptr *Ctensor, self Ctensor){ -C.atg_tanh_(ptr, self) -} -func AtgTanhBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor){ -C.atg_tanh_backward(ptr, gradOutput, output) -} -func AtgTanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor){ -C.atg_tanh_backward_out(ptr, gradInput, gradOutput, output) -} -func AtgTanhOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_tanh_out(ptr, out, self) -} -func AtgTensordot(ptr *Ctensor, self Ctensor, other Ctensor, dimsSelfData []int64, dimsSelfLen int, dimsOtherData []int64, dimsOtherLen int){ -cdimsSelfDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsSelfData[0])) -cdimsSelfLen := *(*C.int)(unsafe.Pointer(&dimsSelfLen)) -cdimsOtherDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsOtherData[0])) -cdimsOtherLen := *(*C.int)(unsafe.Pointer(&dimsOtherLen)) -C.atg_tensordot(ptr, self, other, cdimsSelfDataPtr, cdimsSelfLen, cdimsOtherDataPtr, cdimsOtherLen) -} -func AtgThreshold(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ -C.atg_threshold(ptr, self, threshold , value ) -} -func AtgThreshold_(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ -C.atg_threshold_(ptr, self, threshold , value ) -} -func AtgThresholdBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, threshold Cscalar){ -C.atg_threshold_backward(ptr, gradOutput, self, threshold ) -} -func AtgThresholdOut(ptr *Ctensor, out Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ -C.atg_threshold_out(ptr, out, self, threshold , value ) -} -func AtgTo(ptr *Ctensor, self Ctensor, device int32){ -cdevice := *(*C.int)(unsafe.Pointer(&device)) -C.atg_to(ptr, self, cdevice) -} -func AtgTo1(ptr *Ctensor, self Ctensor, optionsKind int32, optionsDevice int32, nonBlocking int32, copy int32){ -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -ccopy := *(*C.int)(unsafe.Pointer(©)) -C.atg_to1(ptr, self, coptionsKind, coptionsDevice, cnonBlocking, ccopy) -} -func AtgTo2(ptr *Ctensor, self Ctensor, dtype int32, nonBlocking int32, copy int32){ -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -ccopy := *(*C.int)(unsafe.Pointer(©)) -C.atg_to2(ptr, self, cdtype, cnonBlocking, ccopy) -} -func AtgTo3(ptr *Ctensor, self Ctensor, other Ctensor, nonBlocking int32, copy int32){ -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -ccopy := *(*C.int)(unsafe.Pointer(©)) -C.atg_to3(ptr, self, other, cnonBlocking, ccopy) -} -func AtgTo4(ptr *Ctensor, self Ctensor, device int32, dtype int32, nonBlocking int32, copy int32){ -cdevice := *(*C.int)(unsafe.Pointer(&device)) -cdtype := *(*C.int)(unsafe.Pointer(&dtype)) -cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) -ccopy := *(*C.int)(unsafe.Pointer(©)) -C.atg_to4(ptr, self, cdevice, cdtype, cnonBlocking, ccopy) -} -func AtgToDense(ptr *Ctensor, self Ctensor){ -C.atg_to_dense(ptr, self) -} -func AtgToDenseBackward(ptr *Ctensor, grad Ctensor, input Ctensor){ -C.atg_to_dense_backward(ptr, grad, input) -} -func AtgToMkldnn(ptr *Ctensor, self Ctensor){ -C.atg_to_mkldnn(ptr, self) -} -func AtgToMkldnnBackward(ptr *Ctensor, grad Ctensor, input Ctensor){ -C.atg_to_mkldnn_backward(ptr, grad, input) -} -func AtgToSparse(ptr *Ctensor, self Ctensor){ -C.atg_to_sparse(ptr, self) -} -func AtgToSparse1(ptr *Ctensor, self Ctensor, sparseDim int64){ -csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) -C.atg_to_sparse1(ptr, self, csparseDim) -} -func AtgTopk(ptr *Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32){ -ck := *(*C.int64_t)(unsafe.Pointer(&k)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -clargest := *(*C.int)(unsafe.Pointer(&largest)) -csorted := *(*C.int)(unsafe.Pointer(&sorted)) -C.atg_topk(ptr, self, ck, cdim, clargest, csorted) -} -func AtgTopkOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32){ -ck := *(*C.int64_t)(unsafe.Pointer(&k)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -clargest := *(*C.int)(unsafe.Pointer(&largest)) -csorted := *(*C.int)(unsafe.Pointer(&sorted)) -C.atg_topk_out(ptr, values, indices, self, ck, cdim, clargest, csorted) -} -func AtgTotype(ptr *Ctensor, self Ctensor, scalarType int32){ -cscalarType := *(*C.int)(unsafe.Pointer(&scalarType)) -C.atg_totype(ptr, self, cscalarType) -} -func AtgTrace(ptr *Ctensor, self Ctensor){ -C.atg_trace(ptr, self) -} -func AtgTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ -cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) -cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) -C.atg_transpose(ptr, self, cdim0, cdim1) -} -func AtgTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ -cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) -cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) -C.atg_transpose_(ptr, self, cdim0, cdim1) -} -func AtgTrapz(ptr *Ctensor, y Ctensor, x Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_trapz(ptr, y, x, cdim) -} -func AtgTrapz1(ptr *Ctensor, y Ctensor, dx float64, dim int64){ -cdx := *(*C.double)(unsafe.Pointer(&dx)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_trapz1(ptr, y, cdx, cdim) -} -func AtgTriangularSolve(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) -cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) -C.atg_triangular_solve(ptr, self, a, cupper, ctranspose, cunitriangular) -} -func AtgTriangularSolveOut(ptr *Ctensor, x Ctensor, m Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ -cupper := *(*C.int)(unsafe.Pointer(&upper)) -ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) -cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) -C.atg_triangular_solve_out(ptr, x, m, self, a, cupper, ctranspose, cunitriangular) -} -func AtgTril(ptr *Ctensor, self Ctensor, diagonal int64){ -cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) -C.atg_tril(ptr, self, cdiagonal) -} -func AtgTril_(ptr *Ctensor, self Ctensor, diagonal int64){ -cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) -C.atg_tril_(ptr, self, cdiagonal) -} -func AtgTrilIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32){ -crow := *(*C.int64_t)(unsafe.Pointer(&row)) -ccol := *(*C.int64_t)(unsafe.Pointer(&col)) -coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_tril_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) -} -func AtgTrilOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ -cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) -C.atg_tril_out(ptr, out, self, cdiagonal) -} -func AtgTripletMarginLoss(ptr *Ctensor, anchor Ctensor, positive Ctensor, negative Ctensor, margin float64, p float64, eps float64, swap int32, reduction int64){ -cmargin := *(*C.double)(unsafe.Pointer(&margin)) -cp := *(*C.double)(unsafe.Pointer(&p)) -ceps := *(*C.double)(unsafe.Pointer(&eps)) -cswap := *(*C.int)(unsafe.Pointer(&swap)) -creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) -C.atg_triplet_margin_loss(ptr, anchor, positive, negative, cmargin, cp, ceps, cswap, creduction) -} -func AtgTriu(ptr *Ctensor, self Ctensor, diagonal int64){ -cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) -C.atg_triu(ptr, self, cdiagonal) -} -func AtgTriu_(ptr *Ctensor, self Ctensor, diagonal int64){ -cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) -C.atg_triu_(ptr, self, cdiagonal) -} -func AtgTriuIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32){ -crow := *(*C.int64_t)(unsafe.Pointer(&row)) -ccol := *(*C.int64_t)(unsafe.Pointer(&col)) -coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_triu_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) -} -func AtgTriuOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ -cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) -C.atg_triu_out(ptr, out, self, cdiagonal) -} -func AtgTrueDivide(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_true_divide(ptr, self, other) -} -func AtgTrueDivide1(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_true_divide1(ptr, self, other ) -} -func AtgTrueDivide_(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_true_divide_(ptr, self, other) -} -func AtgTrueDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){ -C.atg_true_divide_1(ptr, self, other ) -} -func AtgTrueDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ -C.atg_true_divide_out(ptr, out, self, other) -} -func AtgTrunc(ptr *Ctensor, self Ctensor){ -C.atg_trunc(ptr, self) -} -func AtgTrunc_(ptr *Ctensor, self Ctensor){ -C.atg_trunc_(ptr, self) -} -func AtgTruncOut(ptr *Ctensor, out Ctensor, self Ctensor){ -C.atg_trunc_out(ptr, out, self) -} -func AtgTypeAs(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_type_as(ptr, self, other) +func AtgSqrt_(ptr *Ctensor, self Ctensor) { + C.atg_sqrt_(ptr, self) +} +func AtgSqrtOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_sqrt_out(ptr, out, self) +} +func AtgSquare(ptr *Ctensor, self Ctensor) { + C.atg_square(ptr, self) +} +func AtgSquare_(ptr *Ctensor, self Ctensor) { + C.atg_square_(ptr, self) +} +func AtgSqueeze(ptr *Ctensor, self Ctensor) { + C.atg_squeeze(ptr, self) +} +func AtgSqueeze1(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_squeeze1(ptr, self, cdim) +} +func AtgSqueeze_(ptr *Ctensor, self Ctensor) { + C.atg_squeeze_(ptr, self) +} +func AtgSqueeze1_(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_squeeze_1(ptr, self, cdim) +} +func AtgSspaddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { + C.atg_sspaddmm(ptr, self, mat1, mat2) +} +func AtgSspaddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { + C.atg_sspaddmm_out(ptr, out, self, mat1, mat2) +} +func AtgStack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { + ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) + ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_stack(ptr, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgStackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { + ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) + ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_stack_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgStd(ptr *Ctensor, self Ctensor, unbiased int32) { + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + C.atg_std(ptr, self, cunbiased) +} +func AtgStd1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_std1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStdMean(ptr *Ctensor, self Ctensor, unbiased int32) { + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + C.atg_std_mean(ptr, self, cunbiased) +} +func AtgStdMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_std_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStdOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_std_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStft(ptr *Ctensor, self Ctensor, nFft int64, hopLength int64, winLength int64, window Ctensor, normalized int32, onesided int32) { + cnFft := *(*C.int64_t)(unsafe.Pointer(&nFft)) + chopLength := *(*C.int64_t)(unsafe.Pointer(&hopLength)) + cwinLength := *(*C.int64_t)(unsafe.Pointer(&winLength)) + cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) + conesided := *(*C.int)(unsafe.Pointer(&onesided)) + C.atg_stft(ptr, self, cnFft, chopLength, cwinLength, window, cnormalized, conesided) +} +func AtgSub(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_sub(ptr, self, other) +} +func AtgSub1(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_sub1(ptr, self, other) +} +func AtgSub_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_sub_(ptr, self, other) +} +func AtgSub1_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_sub_1(ptr, self, other) +} +func AtgSubOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_sub_out(ptr, out, self, other) +} +func AtgSum(ptr *Ctensor, self Ctensor, dtype int32) { + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_sum(ptr, self, cdtype) +} +func AtgSum1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_sum1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgSumOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + C.atg_sum_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgSumToSize(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_sum_to_size(ptr, self, csizeDataPtr, csizeLen) +} +func AtgSvd(ptr *Ctensor, self Ctensor, some int32, computeUv int32) { + csome := *(*C.int)(unsafe.Pointer(&some)) + ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) + C.atg_svd(ptr, self, csome, ccomputeUv) +} +func AtgSvdOut(ptr *Ctensor, u Ctensor, s Ctensor, v Ctensor, self Ctensor, some int32, computeUv int32) { + csome := *(*C.int)(unsafe.Pointer(&some)) + ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) + C.atg_svd_out(ptr, u, s, v, self, csome, ccomputeUv) +} +func AtgSymeig(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32) { + ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg_symeig(ptr, self, ceigenvectors, cupper) +} +func AtgSymeigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32, upper int32) { + ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) + cupper := *(*C.int)(unsafe.Pointer(&upper)) + C.atg_symeig_out(ptr, e, v, self, ceigenvectors, cupper) +} +func AtgT(ptr *Ctensor, self Ctensor) { + C.atg_t(ptr, self) +} +func AtgT_(ptr *Ctensor, self Ctensor) { + C.atg_t_(ptr, self) +} +func AtgTake(ptr *Ctensor, self Ctensor, index Ctensor) { + C.atg_take(ptr, self, index) +} +func AtgTakeOut(ptr *Ctensor, out Ctensor, self Ctensor, index Ctensor) { + C.atg_take_out(ptr, out, self, index) +} +func AtgTan(ptr *Ctensor, self Ctensor) { + C.atg_tan(ptr, self) +} +func AtgTan_(ptr *Ctensor, self Ctensor) { + C.atg_tan_(ptr, self) +} +func AtgTanOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_tan_out(ptr, out, self) +} +func AtgTanh(ptr *Ctensor, self Ctensor) { + C.atg_tanh(ptr, self) +} +func AtgTanh_(ptr *Ctensor, self Ctensor) { + C.atg_tanh_(ptr, self) +} +func AtgTanhBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor) { + C.atg_tanh_backward(ptr, gradOutput, output) +} +func AtgTanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor) { + C.atg_tanh_backward_out(ptr, gradInput, gradOutput, output) +} +func AtgTanhOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_tanh_out(ptr, out, self) +} +func AtgTensordot(ptr *Ctensor, self Ctensor, other Ctensor, dimsSelfData []int64, dimsSelfLen int, dimsOtherData []int64, dimsOtherLen int) { + cdimsSelfDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsSelfData[0])) + cdimsSelfLen := *(*C.int)(unsafe.Pointer(&dimsSelfLen)) + cdimsOtherDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsOtherData[0])) + cdimsOtherLen := *(*C.int)(unsafe.Pointer(&dimsOtherLen)) + C.atg_tensordot(ptr, self, other, cdimsSelfDataPtr, cdimsSelfLen, cdimsOtherDataPtr, cdimsOtherLen) +} +func AtgThreshold(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar) { + C.atg_threshold(ptr, self, threshold, value) +} +func AtgThreshold_(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar) { + C.atg_threshold_(ptr, self, threshold, value) +} +func AtgThresholdBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, threshold Cscalar) { + C.atg_threshold_backward(ptr, gradOutput, self, threshold) +} +func AtgThresholdOut(ptr *Ctensor, out Ctensor, self Ctensor, threshold Cscalar, value Cscalar) { + C.atg_threshold_out(ptr, out, self, threshold, value) +} +func AtgTo(ptr *Ctensor, self Ctensor, device int32) { + cdevice := *(*C.int)(unsafe.Pointer(&device)) + C.atg_to(ptr, self, cdevice) +} +func AtgTo1(ptr *Ctensor, self Ctensor, optionsKind int32, optionsDevice int32, nonBlocking int32, copy int32) { + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + ccopy := *(*C.int)(unsafe.Pointer(©)) + C.atg_to1(ptr, self, coptionsKind, coptionsDevice, cnonBlocking, ccopy) +} +func AtgTo2(ptr *Ctensor, self Ctensor, dtype int32, nonBlocking int32, copy int32) { + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + ccopy := *(*C.int)(unsafe.Pointer(©)) + C.atg_to2(ptr, self, cdtype, cnonBlocking, ccopy) +} +func AtgTo3(ptr *Ctensor, self Ctensor, other Ctensor, nonBlocking int32, copy int32) { + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + ccopy := *(*C.int)(unsafe.Pointer(©)) + C.atg_to3(ptr, self, other, cnonBlocking, ccopy) +} +func AtgTo4(ptr *Ctensor, self Ctensor, device int32, dtype int32, nonBlocking int32, copy int32) { + cdevice := *(*C.int)(unsafe.Pointer(&device)) + cdtype := *(*C.int)(unsafe.Pointer(&dtype)) + cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) + ccopy := *(*C.int)(unsafe.Pointer(©)) + C.atg_to4(ptr, self, cdevice, cdtype, cnonBlocking, ccopy) +} +func AtgToDense(ptr *Ctensor, self Ctensor) { + C.atg_to_dense(ptr, self) +} +func AtgToDenseBackward(ptr *Ctensor, grad Ctensor, input Ctensor) { + C.atg_to_dense_backward(ptr, grad, input) +} +func AtgToMkldnn(ptr *Ctensor, self Ctensor) { + C.atg_to_mkldnn(ptr, self) +} +func AtgToMkldnnBackward(ptr *Ctensor, grad Ctensor, input Ctensor) { + C.atg_to_mkldnn_backward(ptr, grad, input) +} +func AtgToSparse(ptr *Ctensor, self Ctensor) { + C.atg_to_sparse(ptr, self) +} +func AtgToSparse1(ptr *Ctensor, self Ctensor, sparseDim int64) { + csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) + C.atg_to_sparse1(ptr, self, csparseDim) +} +func AtgTopk(ptr *Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32) { + ck := *(*C.int64_t)(unsafe.Pointer(&k)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + clargest := *(*C.int)(unsafe.Pointer(&largest)) + csorted := *(*C.int)(unsafe.Pointer(&sorted)) + C.atg_topk(ptr, self, ck, cdim, clargest, csorted) +} +func AtgTopkOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32) { + ck := *(*C.int64_t)(unsafe.Pointer(&k)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + clargest := *(*C.int)(unsafe.Pointer(&largest)) + csorted := *(*C.int)(unsafe.Pointer(&sorted)) + C.atg_topk_out(ptr, values, indices, self, ck, cdim, clargest, csorted) +} +func AtgTotype(ptr *Ctensor, self Ctensor, scalarType int32) { + cscalarType := *(*C.int)(unsafe.Pointer(&scalarType)) + C.atg_totype(ptr, self, cscalarType) +} +func AtgTrace(ptr *Ctensor, self Ctensor) { + C.atg_trace(ptr, self) +} +func AtgTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { + cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) + cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) + C.atg_transpose(ptr, self, cdim0, cdim1) +} +func AtgTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { + cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) + cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) + C.atg_transpose_(ptr, self, cdim0, cdim1) +} +func AtgTrapz(ptr *Ctensor, y Ctensor, x Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_trapz(ptr, y, x, cdim) +} +func AtgTrapz1(ptr *Ctensor, y Ctensor, dx float64, dim int64) { + cdx := *(*C.double)(unsafe.Pointer(&dx)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_trapz1(ptr, y, cdx, cdim) +} +func AtgTriangularSolve(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) + cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) + C.atg_triangular_solve(ptr, self, a, cupper, ctranspose, cunitriangular) +} +func AtgTriangularSolveOut(ptr *Ctensor, x Ctensor, m Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32) { + cupper := *(*C.int)(unsafe.Pointer(&upper)) + ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) + cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) + C.atg_triangular_solve_out(ptr, x, m, self, a, cupper, ctranspose, cunitriangular) +} +func AtgTril(ptr *Ctensor, self Ctensor, diagonal int64) { + cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) + C.atg_tril(ptr, self, cdiagonal) +} +func AtgTril_(ptr *Ctensor, self Ctensor, diagonal int64) { + cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) + C.atg_tril_(ptr, self, cdiagonal) +} +func AtgTrilIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32) { + crow := *(*C.int64_t)(unsafe.Pointer(&row)) + ccol := *(*C.int64_t)(unsafe.Pointer(&col)) + coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_tril_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) +} +func AtgTrilOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64) { + cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) + C.atg_tril_out(ptr, out, self, cdiagonal) +} +func AtgTripletMarginLoss(ptr *Ctensor, anchor Ctensor, positive Ctensor, negative Ctensor, margin float64, p float64, eps float64, swap int32, reduction int64) { + cmargin := *(*C.double)(unsafe.Pointer(&margin)) + cp := *(*C.double)(unsafe.Pointer(&p)) + ceps := *(*C.double)(unsafe.Pointer(&eps)) + cswap := *(*C.int)(unsafe.Pointer(&swap)) + creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) + C.atg_triplet_margin_loss(ptr, anchor, positive, negative, cmargin, cp, ceps, cswap, creduction) +} +func AtgTriu(ptr *Ctensor, self Ctensor, diagonal int64) { + cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) + C.atg_triu(ptr, self, cdiagonal) +} +func AtgTriu_(ptr *Ctensor, self Ctensor, diagonal int64) { + cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) + C.atg_triu_(ptr, self, cdiagonal) +} +func AtgTriuIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32) { + crow := *(*C.int64_t)(unsafe.Pointer(&row)) + ccol := *(*C.int64_t)(unsafe.Pointer(&col)) + coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_triu_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) +} +func AtgTriuOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64) { + cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) + C.atg_triu_out(ptr, out, self, cdiagonal) +} +func AtgTrueDivide(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_true_divide(ptr, self, other) +} +func AtgTrueDivide1(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_true_divide1(ptr, self, other) +} +func AtgTrueDivide_(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_true_divide_(ptr, self, other) +} +func AtgTrueDivide1_(ptr *Ctensor, self Ctensor, other Cscalar) { + C.atg_true_divide_1(ptr, self, other) +} +func AtgTrueDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { + C.atg_true_divide_out(ptr, out, self, other) +} +func AtgTrunc(ptr *Ctensor, self Ctensor) { + C.atg_trunc(ptr, self) +} +func AtgTrunc_(ptr *Ctensor, self Ctensor) { + C.atg_trunc_(ptr, self) +} +func AtgTruncOut(ptr *Ctensor, out Ctensor, self Ctensor) { + C.atg_trunc_out(ptr, out, self) +} +func AtgTypeAs(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_type_as(ptr, self, other) } -func AtgUnfold(ptr *Ctensor, self Ctensor, dimension int64, size int64, step int64){ -cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) -csize := *(*C.int64_t)(unsafe.Pointer(&size)) -cstep := *(*C.int64_t)(unsafe.Pointer(&step)) -C.atg_unfold(ptr, self, cdimension, csize, cstep) +func AtgUnfold(ptr *Ctensor, self Ctensor, dimension int64, size int64, step int64) { + cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) + csize := *(*C.int64_t)(unsafe.Pointer(&size)) + cstep := *(*C.int64_t)(unsafe.Pointer(&step)) + C.atg_unfold(ptr, self, cdimension, csize, cstep) } -func AtgUniform_(ptr *Ctensor, self Ctensor, from float64, to float64){ -cfrom := *(*C.double)(unsafe.Pointer(&from)) -cto := *(*C.double)(unsafe.Pointer(&to)) -C.atg_uniform_(ptr, self, cfrom, cto) +func AtgUniform_(ptr *Ctensor, self Ctensor, from float64, to float64) { + cfrom := *(*C.double)(unsafe.Pointer(&from)) + cto := *(*C.double)(unsafe.Pointer(&to)) + C.atg_uniform_(ptr, self, cfrom, cto) } -func AtgUniqueConsecutive(ptr *Ctensor, self Ctensor, returnInverse int32, returnCounts int32, dim int64){ -creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) -creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_unique_consecutive(ptr, self, creturnInverse, creturnCounts, cdim) +func AtgUniqueConsecutive(ptr *Ctensor, self Ctensor, returnInverse int32, returnCounts int32, dim int64) { + creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) + creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_unique_consecutive(ptr, self, creturnInverse, creturnCounts, cdim) } -func AtgUniqueDim(ptr *Ctensor, self Ctensor, dim int64, sorted int32, returnInverse int32, returnCounts int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -csorted := *(*C.int)(unsafe.Pointer(&sorted)) -creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) -creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) -C.atg_unique_dim(ptr, self, cdim, csorted, creturnInverse, creturnCounts) +func AtgUniqueDim(ptr *Ctensor, self Ctensor, dim int64, sorted int32, returnInverse int32, returnCounts int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + csorted := *(*C.int)(unsafe.Pointer(&sorted)) + creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) + creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) + C.atg_unique_dim(ptr, self, cdim, csorted, creturnInverse, creturnCounts) } -func AtgUniqueDimConsecutive(ptr *Ctensor, self Ctensor, dim int64, returnInverse int32, returnCounts int32){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) -creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) -C.atg_unique_dim_consecutive(ptr, self, cdim, creturnInverse, creturnCounts) +func AtgUniqueDimConsecutive(ptr *Ctensor, self Ctensor, dim int64, returnInverse int32, returnCounts int32) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) + creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) + C.atg_unique_dim_consecutive(ptr, self, cdim, creturnInverse, creturnCounts) } -func AtgUnsqueeze(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_unsqueeze(ptr, self, cdim) +func AtgUnsqueeze(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_unsqueeze(ptr, self, cdim) } -func AtgUnsqueeze_(ptr *Ctensor, self Ctensor, dim int64){ -cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) -C.atg_unsqueeze_(ptr, self, cdim) +func AtgUnsqueeze_(ptr *Ctensor, self Ctensor, dim int64) { + cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) + C.atg_unsqueeze_(ptr, self, cdim) } -func AtgUpsampleBicubic2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_bicubic2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgUpsampleBicubic2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_bicubic2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) } -func AtgUpsampleBicubic2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_bicubic2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgUpsampleBicubic2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_bicubic2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) } -func AtgUpsampleBicubic2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_bicubic2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgUpsampleBicubic2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_bicubic2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) } -func AtgUpsampleBicubic2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_bicubic2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgUpsampleBicubic2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_bicubic2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) } -func AtgUpsampleBilinear2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_bilinear2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgUpsampleBilinear2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_bilinear2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) } -func AtgUpsampleBilinear2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_bilinear2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgUpsampleBilinear2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_bilinear2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) } -func AtgUpsampleBilinear2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_bilinear2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgUpsampleBilinear2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_bilinear2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) } -func AtgUpsampleBilinear2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_bilinear2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgUpsampleBilinear2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_bilinear2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) } -func AtgUpsampleLinear1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscales := *(*C.double)(unsafe.Pointer(&scales)) -C.atg_upsample_linear1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales) +func AtgUpsampleLinear1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscales := *(*C.double)(unsafe.Pointer(&scales)) + C.atg_upsample_linear1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales) } -func AtgUpsampleLinear1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscales := *(*C.double)(unsafe.Pointer(&scales)) -C.atg_upsample_linear1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales) +func AtgUpsampleLinear1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscales := *(*C.double)(unsafe.Pointer(&scales)) + C.atg_upsample_linear1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales) } -func AtgUpsampleLinear1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscales := *(*C.double)(unsafe.Pointer(&scales)) -C.atg_upsample_linear1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales) +func AtgUpsampleLinear1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscales := *(*C.double)(unsafe.Pointer(&scales)) + C.atg_upsample_linear1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales) } -func AtgUpsampleLinear1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscales := *(*C.double)(unsafe.Pointer(&scales)) -C.atg_upsample_linear1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales) +func AtgUpsampleLinear1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscales := *(*C.double)(unsafe.Pointer(&scales)) + C.atg_upsample_linear1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales) } -func AtgUpsampleNearest1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cscales := *(*C.double)(unsafe.Pointer(&scales)) -C.atg_upsample_nearest1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscales) +func AtgUpsampleNearest1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cscales := *(*C.double)(unsafe.Pointer(&scales)) + C.atg_upsample_nearest1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscales) } -func AtgUpsampleNearest1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -cscales := *(*C.double)(unsafe.Pointer(&scales)) -C.atg_upsample_nearest1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales) +func AtgUpsampleNearest1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + cscales := *(*C.double)(unsafe.Pointer(&scales)) + C.atg_upsample_nearest1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales) } -func AtgUpsampleNearest1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -cscales := *(*C.double)(unsafe.Pointer(&scales)) -C.atg_upsample_nearest1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales) +func AtgUpsampleNearest1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + cscales := *(*C.double)(unsafe.Pointer(&scales)) + C.atg_upsample_nearest1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales) } -func AtgUpsampleNearest1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cscales := *(*C.double)(unsafe.Pointer(&scales)) -C.atg_upsample_nearest1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscales) +func AtgUpsampleNearest1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cscales := *(*C.double)(unsafe.Pointer(&scales)) + C.atg_upsample_nearest1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscales) } -func AtgUpsampleNearest2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_nearest2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) +func AtgUpsampleNearest2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_nearest2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) } -func AtgUpsampleNearest2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_nearest2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW) +func AtgUpsampleNearest2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_nearest2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW) } -func AtgUpsampleNearest2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_nearest2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW) +func AtgUpsampleNearest2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_nearest2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW) } -func AtgUpsampleNearest2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_nearest2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) +func AtgUpsampleNearest2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_nearest2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) } -func AtgUpsampleNearest3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_nearest3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW) +func AtgUpsampleNearest3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_nearest3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW) } -func AtgUpsampleNearest3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_nearest3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW) +func AtgUpsampleNearest3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_nearest3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW) } -func AtgUpsampleNearest3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_nearest3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW) +func AtgUpsampleNearest3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_nearest3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW) } -func AtgUpsampleNearest3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_nearest3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW) +func AtgUpsampleNearest3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_nearest3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW) } -func AtgUpsampleTrilinear3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_trilinear3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +func AtgUpsampleTrilinear3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_trilinear3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) } -func AtgUpsampleTrilinear3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_trilinear3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +func AtgUpsampleTrilinear3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_trilinear3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) } -func AtgUpsampleTrilinear3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) -cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_trilinear3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +func AtgUpsampleTrilinear3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) + cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_trilinear3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) } -func AtgUpsampleTrilinear3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){ -coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) -coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) -calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) -cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) -cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) -cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) -C.atg_upsample_trilinear3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +func AtgUpsampleTrilinear3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64) { + coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) + coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) + calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) + cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) + cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) + cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) + C.atg_upsample_trilinear3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) } -func AtgValues(ptr *Ctensor, self Ctensor){ -C.atg_values(ptr, self) +func AtgValues(ptr *Ctensor, self Ctensor) { + C.atg_values(ptr, self) } -func AtgVar(ptr *Ctensor, self Ctensor, unbiased int32){ -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -C.atg_var(ptr, self, cunbiased) +func AtgVar(ptr *Ctensor, self Ctensor, unbiased int32) { + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + C.atg_var(ptr, self, cunbiased) } -func AtgVar1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_var1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +func AtgVar1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_var1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) } -func AtgVarMean(ptr *Ctensor, self Ctensor, unbiased int32){ -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -C.atg_var_mean(ptr, self, cunbiased) +func AtgVarMean(ptr *Ctensor, self Ctensor, unbiased int32) { + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + C.atg_var_mean(ptr, self, cunbiased) } -func AtgVarMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_var_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +func AtgVarMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_var_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) } -func AtgVarOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ -cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) -cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) -cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) -ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) -C.atg_var_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +func AtgVarOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { + cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) + cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) + cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) + ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) + C.atg_var_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) } -func AtgView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_view(ptr, self, csizeDataPtr, csizeLen) +func AtgView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_view(ptr, self, csizeDataPtr, csizeLen) } -func AtgViewAs(ptr *Ctensor, self Ctensor, other Ctensor){ -C.atg_view_as(ptr, self, other) +func AtgViewAs(ptr *Ctensor, self Ctensor, other Ctensor) { + C.atg_view_as(ptr, self, other) } -func AtgWhere1(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor){ -C.atg_where1(ptr, condition, self, other) +func AtgWhere1(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor) { + C.atg_where1(ptr, condition, self, other) } -func AtgZero_(ptr *Ctensor, self Ctensor){ -C.atg_zero_(ptr, self) +func AtgZero_(ptr *Ctensor, self Ctensor) { + C.atg_zero_(ptr, self) } -func AtgZeros(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) -coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) -C.atg_zeros(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +func AtgZeros(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) + coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) + C.atg_zeros(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) } -func AtgZerosLike(ptr *Ctensor, self Ctensor){ -C.atg_zeros_like(ptr, self) +func AtgZerosLike(ptr *Ctensor, self Ctensor) { + C.atg_zeros_like(ptr, self) } -func AtgZerosOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ -csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) -csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) -C.atg_zeros_out(ptr, out, csizeDataPtr, csizeLen) +func AtgZerosOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { + csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) + csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) + C.atg_zeros_out(ptr, out, csizeDataPtr, csizeLen) } diff --git a/nn/batch-norm.go b/nn/batch-norm.go index 040e677..1b5d471 100644 --- a/nn/batch-norm.go +++ b/nn/batch-norm.go @@ -17,8 +17,8 @@ type BatchNormConfig struct { BsInit Init } -func DefaultBatchNormConfig() BatchNormConfig { - return BatchNormConfig{ +func DefaultBatchNormConfig() *BatchNormConfig { + return &BatchNormConfig{ CudnnEnable: true, Eps: 1e-5, Momentum: 0.1, @@ -29,17 +29,17 @@ func DefaultBatchNormConfig() BatchNormConfig { // A batch-normalization layer. type BatchNorm struct { - config BatchNormConfig - RunningMean ts.Tensor - RunningVar ts.Tensor - Ws ts.Tensor - Bs ts.Tensor + config *BatchNormConfig + RunningMean *ts.Tensor + RunningVar *ts.Tensor + Ws *ts.Tensor + Bs *ts.Tensor Nd uint } // NewBatchNorm creates a new BatchNorm layer -func NewBatchNorm(vs Path, nd uint, outDim int64, config BatchNormConfig) BatchNorm { - return BatchNorm{ +func NewBatchNorm(vs Path, nd uint, outDim int64, config *BatchNormConfig) *BatchNorm { + return &BatchNorm{ config: config, RunningMean: vs.ZerosNoTrain("running_mean", []int64{outDim}), RunningVar: vs.OnesNoTrain("running_var", []int64{outDim}), @@ -52,7 +52,7 @@ func NewBatchNorm(vs Path, nd uint, outDim int64, config BatchNormConfig) BatchN // // The input shape is assumed to be (N, C, L). Normalization // is performed over the first batch dimension N. -func BatchNorm1D(vs Path, outDim int64, config BatchNormConfig) BatchNorm { +func BatchNorm1D(vs Path, outDim int64, config *BatchNormConfig) *BatchNorm { return NewBatchNorm(vs, 1, outDim, config) } @@ -60,7 +60,7 @@ func BatchNorm1D(vs Path, outDim int64, config BatchNormConfig) BatchNorm { // // The input shape is assumed to be (N, C, H, W). Normalization // is performed over the first batch dimension N. -func BatchNorm2D(vs Path, outDim int64, config BatchNormConfig) BatchNorm { +func BatchNorm2D(vs Path, outDim int64, config *BatchNormConfig) *BatchNorm { return NewBatchNorm(vs, 2, outDim, config) } @@ -68,14 +68,14 @@ func BatchNorm2D(vs Path, outDim int64, config BatchNormConfig) BatchNorm { // // The input shape is assumed to be (N, C, D, H, W). Normalization // is performed over the first batch dimension N. -func BatchNorm3D(vs Path, outDim int64, config BatchNormConfig) BatchNorm { +func BatchNorm3D(vs Path, outDim int64, config *BatchNormConfig) *BatchNorm { return NewBatchNorm(vs, 3, outDim, config) } // Implement ModuleT interface for BatchNorm: // ========================================== -func (bn BatchNorm) ForwardT(xs ts.Tensor, train bool) (retVal ts.Tensor) { +func (bn *BatchNorm) ForwardT(xs *ts.Tensor, train bool) (retVal *ts.Tensor) { dim := xs.Dim() diff --git a/nn/conv-transpose.go b/nn/conv-transpose.go index 64e93b0..0fbbe02 100644 --- a/nn/conv-transpose.go +++ b/nn/conv-transpose.go @@ -42,8 +42,8 @@ type ConvTranspose3DConfig struct { } // DefaultConvConfig create a default 1D ConvConfig -func DefaultConvTranspose1DConfig() ConvTranspose1DConfig { - return ConvTranspose1DConfig{ +func DefaultConvTranspose1DConfig() *ConvTranspose1DConfig { + return &ConvTranspose1DConfig{ Stride: []int64{1}, Padding: []int64{0}, OutputPadding: []int64{0}, @@ -56,83 +56,107 @@ func DefaultConvTranspose1DConfig() ConvTranspose1DConfig { } type ConvTranspose1D struct { - Ws ts.Tensor - Bs ts.Tensor // optional - Config ConvTranspose1DConfig + Ws *ts.Tensor + Bs *ts.Tensor // optional + Config *ConvTranspose1DConfig } -func NewConvTranspose1D(vs *Path, inDim, outDim int64, ksizes []int64, cfg ConvTranspose1DConfig) ConvTranspose1D { +func NewConvTranspose1D(vs *Path, inDim, outDim int64, ksizes []int64, cfg *ConvTranspose1DConfig) *ConvTranspose1D { if len(ksizes) != 1 { log.Fatalf("NewConvTranspose1D method call: Kernel size should be 1. Got %v\n", len(ksizes)) } - var conv ConvTranspose1D - conv.Config = cfg - if cfg.Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) - } + var ( + ws *ts.Tensor + bs *ts.Tensor + ) + weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, ksizes...) - conv.Ws = vs.NewVar("weight", weightSize, cfg.WsInit) + ws = vs.NewVar("weight", weightSize, cfg.WsInit) - return conv + if cfg.Bias { + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) + } + + return &ConvTranspose1D{ + Ws: ws, + Bs: bs, + Config: cfg, + } } type ConvTranspose2D struct { - Ws ts.Tensor - Bs ts.Tensor // optional - Config ConvTranspose2DConfig + Ws *ts.Tensor + Bs *ts.Tensor // optional + Config *ConvTranspose2DConfig } -func NewConvTranspose2D(vs *Path, inDim, outDim int64, ksizes []int64, cfg ConvTranspose2DConfig) ConvTranspose2D { +func NewConvTranspose2D(vs *Path, inDim, outDim int64, ksizes []int64, cfg *ConvTranspose2DConfig) *ConvTranspose2D { if len(ksizes) != 2 { log.Fatalf("NewConvTranspose2D method call: Kernel size should be 2. Got %v\n", len(ksizes)) } - var conv ConvTranspose2D - conv.Config = cfg + + var ( + ws *ts.Tensor + bs *ts.Tensor + ) + if cfg.Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) } weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, ksizes...) - conv.Ws = vs.NewVar("weight", weightSize, cfg.WsInit) + ws = vs.NewVar("weight", weightSize, cfg.WsInit) - return conv + return &ConvTranspose2D{ + Ws: ws, + Bs: bs, + Config: cfg, + } } type ConvTranspose3D struct { - Ws ts.Tensor - Bs ts.Tensor // optional - Config ConvTranspose3DConfig + Ws *ts.Tensor + Bs *ts.Tensor // optional + Config *ConvTranspose3DConfig } -func NewConvTranspose3D(vs *Path, inDim, outDim int64, ksizes []int64, cfg ConvTranspose3DConfig) ConvTranspose3D { +func NewConvTranspose3D(vs *Path, inDim, outDim int64, ksizes []int64, cfg *ConvTranspose3DConfig) *ConvTranspose3D { if len(ksizes) != 3 { log.Fatalf("NewConvTranspose3D method call: Kernel size should be 3. Got %v\n", len(ksizes)) } - var conv ConvTranspose3D - conv.Config = cfg + + var ( + ws *ts.Tensor + bs *ts.Tensor + ) + if cfg.Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) } weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, ksizes...) - conv.Ws = vs.NewVar("weight", weightSize, cfg.WsInit) + ws = vs.NewVar("weight", weightSize, cfg.WsInit) - return conv + return &ConvTranspose3D{ + Ws: ws, + Bs: bs, + Config: cfg, + } } // Implement Module for Conv1D, Conv2D, Conv3D: // ============================================ -func (c ConvTranspose1D) Forward(xs ts.Tensor) ts.Tensor { +func (c *ConvTranspose1D) Forward(xs *ts.Tensor) *ts.Tensor { return ts.MustConvTranspose1d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Groups, c.Config.Dilation) } -func (c ConvTranspose2D) Forward(xs ts.Tensor) ts.Tensor { +func (c *ConvTranspose2D) Forward(xs *ts.Tensor) *ts.Tensor { return ts.MustConvTranspose2d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Groups, c.Config.Dilation) } -func (c ConvTranspose3D) Forward(xs ts.Tensor) ts.Tensor { +func (c *ConvTranspose3D) Forward(xs *ts.Tensor) *ts.Tensor { return ts.MustConvTranspose3d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Groups, c.Config.Dilation) } diff --git a/nn/conv.go b/nn/conv.go index 5e5d93e..9c0a103 100644 --- a/nn/conv.go +++ b/nn/conv.go @@ -40,8 +40,8 @@ type Conv3DConfig struct { } // DefaultConvConfig create a default 1D ConvConfig -func DefaultConv1DConfig() Conv1DConfig { - return Conv1DConfig{ +func DefaultConv1DConfig() *Conv1DConfig { + return &Conv1DConfig{ Stride: []int64{1}, Padding: []int64{0}, Dilation: []int64{1}, @@ -53,8 +53,8 @@ func DefaultConv1DConfig() Conv1DConfig { } // DefaultConvConfig2D creates a default 2D ConvConfig -func DefaultConv2DConfig() Conv2DConfig { - return Conv2DConfig{ +func DefaultConv2DConfig() *Conv2DConfig { + return &Conv2DConfig{ Stride: []int64{1, 1}, Padding: []int64{0, 0}, Dilation: []int64{1, 1}, @@ -66,60 +66,78 @@ func DefaultConv2DConfig() Conv2DConfig { } type Conv1D struct { - Ws ts.Tensor - Bs ts.Tensor // optional - Config Conv1DConfig + Ws *ts.Tensor + Bs *ts.Tensor // optional + Config *Conv1DConfig } -func NewConv1D(vs *Path, inDim, outDim, k int64, cfg Conv1DConfig) Conv1D { - var conv Conv1D - conv.Config = cfg +func NewConv1D(vs *Path, inDim, outDim, k int64, cfg *Conv1DConfig) *Conv1D { + var ( + ws *ts.Tensor + bs *ts.Tensor + ) if cfg.Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) } weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, k) - conv.Ws = vs.NewVar("weight", weightSize, cfg.WsInit) + ws = vs.NewVar("weight", weightSize, cfg.WsInit) - return conv + return &Conv1D{ + Ws: ws, + Bs: bs, + Config: cfg, + } } type Conv2D struct { - Ws ts.Tensor - Bs ts.Tensor // optional - Config Conv2DConfig + Ws *ts.Tensor + Bs *ts.Tensor // optional + Config *Conv2DConfig } -func NewConv2D(vs Path, inDim, outDim int64, k int64, cfg Conv2DConfig) Conv2D { - var conv Conv2D - conv.Config = cfg +func NewConv2D(vs Path, inDim, outDim int64, k int64, cfg *Conv2DConfig) *Conv2D { + var ( + ws *ts.Tensor + bs *ts.Tensor + ) if cfg.Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) } weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, k, k) - conv.Ws = vs.NewVar("weight", weightSize, cfg.WsInit) + ws = vs.NewVar("weight", weightSize, cfg.WsInit) - return conv + return &Conv2D{ + Ws: ws, + Bs: bs, + Config: cfg, + } } type Conv3D struct { - Ws ts.Tensor - Bs ts.Tensor // optional - Config Conv3DConfig + Ws *ts.Tensor + Bs *ts.Tensor // optional + Config *Conv3DConfig } -func NewConv3D(vs *Path, inDim, outDim, k int64, cfg Conv3DConfig) Conv3D { - var conv Conv3D - conv.Config = cfg +func NewConv3D(vs *Path, inDim, outDim, k int64, cfg *Conv3DConfig) *Conv3D { + var ( + ws *ts.Tensor + bs *ts.Tensor + ) if cfg.Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) } weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, k, k, k) - conv.Ws = vs.NewVar("weight", weightSize, cfg.WsInit) + ws = vs.NewVar("weight", weightSize, cfg.WsInit) - return conv + return &Conv3D{ + Ws: ws, + Bs: bs, + Config: cfg, + } } type Conv interface{} @@ -175,38 +193,51 @@ func buildConvConfig(ksizes []int64) interface{} { func NewConv(vs Path, inDim, outDim int64, ksizes []int64, config interface{}) Conv { configT := reflect.TypeOf(config) + var ( + ws *ts.Tensor + bs *ts.Tensor + ) switch { case len(ksizes) == 1 && configT.Name() == "Conv1DConfig": - var conv Conv1D - conv.Config = config.(Conv1DConfig) - if config.(Conv1DConfig).Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, config.(Conv1DConfig).BsInit) + cfg := config.(Conv1DConfig) + if cfg.Bias { + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) } - weightSize := []int64{outDim, int64(inDim / config.(Conv1DConfig).Groups)} + weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, ksizes...) - conv.Ws = vs.NewVar("weight", weightSize, config.(Conv1DConfig).WsInit) - return conv + ws = vs.NewVar("weight", weightSize, cfg.WsInit) + return &Conv1D{ + Ws: ws, + Bs: bs, + Config: &cfg, + } case len(ksizes) == 2 && configT.Name() == "Conv2DConfig": - var conv Conv2D - conv.Config = config.(Conv2DConfig) - if config.(Conv2DConfig).Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, config.(Conv2DConfig).BsInit) + cfg := config.(Conv2DConfig) + if cfg.Bias { + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) } - weightSize := []int64{outDim, int64(inDim / config.(Conv2DConfig).Groups)} + weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, ksizes...) - conv.Ws = vs.NewVar("weight", weightSize, config.(Conv2DConfig).WsInit) - return conv + ws = vs.NewVar("weight", weightSize, config.(Conv2DConfig).WsInit) + return &Conv2D{ + Ws: ws, + Bs: bs, + Config: &cfg, + } case len(ksizes) == 3 && configT.Name() == "Conv3DConfig": - var conv Conv3D - conv.Config = config.(Conv3DConfig) - if config.(Conv3DConfig).Bias { - conv.Bs = vs.NewVar("bias", []int64{outDim}, config.(Conv3DConfig).BsInit) + cfg := config.(Conv3DConfig) + if cfg.Bias { + bs = vs.NewVar("bias", []int64{outDim}, cfg.BsInit) } - weightSize := []int64{outDim, int64(inDim / config.(Conv3DConfig).Groups)} + weightSize := []int64{outDim, int64(inDim / cfg.Groups)} weightSize = append(weightSize, ksizes...) - conv.Ws = vs.NewVar("weight", weightSize, config.(Conv3DConfig).WsInit) - return conv + ws = vs.NewVar("weight", weightSize, cfg.WsInit) + return &Conv3D{ + Ws: ws, + Bs: bs, + Config: &cfg, + } default: err := fmt.Errorf("Expected nd length from 1 to 3. Got %v\n", len(ksizes)) panic(err) @@ -216,14 +247,14 @@ func NewConv(vs Path, inDim, outDim int64, ksizes []int64, config interface{}) C // Implement Module for Conv1D, Conv2D, Conv3D: // ============================================ -func (c Conv1D) Forward(xs ts.Tensor) ts.Tensor { +func (c *Conv1D) Forward(xs *ts.Tensor) *ts.Tensor { return ts.MustConv1d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } -func (c Conv2D) Forward(xs ts.Tensor) ts.Tensor { +func (c *Conv2D) Forward(xs *ts.Tensor) *ts.Tensor { return ts.MustConv2d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } -func (c Conv3D) Forward(xs ts.Tensor) ts.Tensor { +func (c *Conv3D) Forward(xs *ts.Tensor) *ts.Tensor { return ts.MustConv3d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } @@ -232,13 +263,13 @@ func (c Conv3D) Forward(xs ts.Tensor) ts.Tensor { // NOTE: `train` param won't be used, will be? -func (c Conv1D) ForwardT(xs ts.Tensor, train bool) ts.Tensor { +func (c *Conv1D) ForwardT(xs *ts.Tensor, train bool) *ts.Tensor { return ts.MustConv1d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } -func (c Conv2D) ForwardT(xs ts.Tensor, train bool) ts.Tensor { +func (c *Conv2D) ForwardT(xs *ts.Tensor, train bool) *ts.Tensor { return ts.MustConv2d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } -func (c Conv3D) ForwardT(xs ts.Tensor, train bool) ts.Tensor { +func (c *Conv3D) ForwardT(xs *ts.Tensor, train bool) *ts.Tensor { return ts.MustConv3d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } diff --git a/nn/func.go b/nn/func.go index a95c693..ac9f8ea 100644 --- a/nn/func.go +++ b/nn/func.go @@ -7,36 +7,36 @@ import ( ) type Func struct { - f func(ts.Tensor) ts.Tensor + f func(*ts.Tensor) *ts.Tensor } -func NewFunc(fn func(ts.Tensor) ts.Tensor) (retVal Func) { +func NewFunc(fn func(*ts.Tensor) *ts.Tensor) (retVal Func) { return Func{f: fn} } // Implement Module interface for Func: // ==================================== -func (fn Func) Forward(xs ts.Tensor) (retVal ts.Tensor) { +func (fn Func) Forward(xs *ts.Tensor) (retVal *ts.Tensor) { return fn.f(xs) } // ForwardT implements ModuleT for Func object as well. // // NOTE: train param will not be used. -func (fn Func) ForwardT(xs ts.Tensor, train bool) (retVal ts.Tensor) { +func (fn Func) ForwardT(xs *ts.Tensor, train bool) (retVal *ts.Tensor) { return fn.f(xs) } type FuncT struct { - f func(ts.Tensor, bool) ts.Tensor + f func(*ts.Tensor, bool) *ts.Tensor } -func NewFuncT(fn func(ts.Tensor, bool) ts.Tensor) (retVal FuncT) { +func NewFuncT(fn func(*ts.Tensor, bool) *ts.Tensor) (retVal FuncT) { return FuncT{f: fn} } // Implement Module interface for Func: // ==================================== -func (fn FuncT) ForwardT(xs ts.Tensor, train bool) (retVal ts.Tensor) { +func (fn FuncT) ForwardT(xs *ts.Tensor, train bool) (retVal *ts.Tensor) { return fn.f(xs, train) } diff --git a/nn/init.go b/nn/init.go index dcbd67f..3ac5629 100644 --- a/nn/init.go +++ b/nn/init.go @@ -11,10 +11,10 @@ import ( type Init interface { // creates a new tensor with specified initiation - InitTensor(dims []int64, device gotch.Device) (retVal ts.Tensor) + InitTensor(dims []int64, device gotch.Device) (retVal *ts.Tensor) // re-initializes (in-place) an existing tensor with the specified initiation - Set(tensor ts.Tensor) + Set(tensor *ts.Tensor) } // constInit: @@ -28,7 +28,7 @@ func NewConstInit(v float64) constInit { return constInit{v} } -func (c constInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tensor) { +func (c constInit) InitTensor(dims []int64, device gotch.Device) (retVal *ts.Tensor) { var err error kind := gotch.Float switch { @@ -50,7 +50,7 @@ func (c constInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tens return retVal } -func (c constInit) Set(tensor ts.Tensor) { +func (c constInit) Set(tensor *ts.Tensor) { var err error scalarVal := ts.FloatScalar(c.value) if err != nil { @@ -71,7 +71,7 @@ func NewRandnInit(mean, stdev float64) randnInit { return randnInit{mean, stdev} } -func (r randnInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tensor) { +func (r randnInit) InitTensor(dims []int64, device gotch.Device) (retVal *ts.Tensor) { var err error rand.Seed(86) @@ -92,9 +92,9 @@ func (r randnInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tens } -func (r randnInit) Set(tensor ts.Tensor) { +func (r randnInit) Set(tensor *ts.Tensor) { var ( - randnTs ts.Tensor + randnTs *ts.Tensor err error ) @@ -128,7 +128,7 @@ func NewUniformInit(lo, up float64) uniformInit { return uniformInit{lo, up} } -func (u uniformInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tensor) { +func (u uniformInit) InitTensor(dims []int64, device gotch.Device) (retVal *ts.Tensor) { var err error kind := gotch.Float retVal = ts.MustZeros(dims, kind, device) @@ -139,7 +139,7 @@ func (u uniformInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Te return retVal } -func (u uniformInit) Set(tensor ts.Tensor) { +func (u uniformInit) Set(tensor *ts.Tensor) { tensor.Uniform_(u.lo, u.up) } @@ -152,7 +152,7 @@ func NewKaimingUniformInit() kaimingUniformInit { return kaimingUniformInit{} } -func (k kaimingUniformInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tensor) { +func (k kaimingUniformInit) InitTensor(dims []int64, device gotch.Device) (retVal *ts.Tensor) { var fanIn int64 if len(dims) == 0 { log.Fatalf("KaimingUniformInit method call: dims (%v) should have length >= 1", dims) @@ -191,7 +191,7 @@ func factorial(n int64) (result int64) { return 1 } -func (k kaimingUniformInit) Set(tensor ts.Tensor) { +func (k kaimingUniformInit) Set(tensor *ts.Tensor) { dims, err := tensor.Size() if err != nil { log.Fatalf("uniformInit - Set method call error: %v\n", err) @@ -218,12 +218,12 @@ func NewGlorotNInit() glorotNInit { return glorotNInit{} } -func (gl glorotNInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tensor) { +func (gl glorotNInit) InitTensor(dims []int64, device gotch.Device) (retVal *ts.Tensor) { // TODO: implement return } -func (gl glorotNInit) Set(tensor ts.Tensor) { +func (gl glorotNInit) Set(tensor *ts.Tensor) { // TODO: implement } diff --git a/nn/layer-norm.go b/nn/layer-norm.go index 4589180..8c56de1 100644 --- a/nn/layer-norm.go +++ b/nn/layer-norm.go @@ -14,8 +14,8 @@ type LayerNormConfig struct { BsInit Init } -func DefaultLayerNormConfig() LayerNormConfig { - return LayerNormConfig{ +func DefaultLayerNormConfig() *LayerNormConfig { + return &LayerNormConfig{ CudnnEnable: true, Eps: 1e-5, ElementwiseAffine: true, @@ -26,30 +26,30 @@ func DefaultLayerNormConfig() LayerNormConfig { // A layer-normalization layer. type LayerNorm struct { - Config LayerNormConfig - Ws ts.Tensor // optional - Bs ts.Tensor // optional + Config *LayerNormConfig + Ws *ts.Tensor // optional + Bs *ts.Tensor // optional NormalizedShape []int64 } -func NewLayerNorm(vs Path, normalizedShape []int64, config LayerNormConfig) LayerNorm { +func NewLayerNorm(vs Path, normalizedShape []int64, config *LayerNormConfig) *LayerNorm { var ( - ws ts.Tensor - bs ts.Tensor + ws *ts.Tensor + bs *ts.Tensor ) if config.ElementwiseAffine { ws = vs.NewVar("weight", normalizedShape, config.WsInit) bs = vs.NewVar("bias", normalizedShape, config.BsInit) } - return LayerNorm{config, ws, bs, normalizedShape} + return &LayerNorm{config, ws, bs, normalizedShape} } // Implement Module interface for LayerNorm: // ========================================= -func (ln LayerNorm) Forward(xs ts.Tensor) (retVal ts.Tensor) { +func (ln *LayerNorm) Forward(xs *ts.Tensor) (retVal *ts.Tensor) { return ts.MustLayerNorm(xs, ln.NormalizedShape, ln.Ws, ln.Bs, ln.Config.Eps, ln.Config.CudnnEnable) } diff --git a/nn/linear.go b/nn/linear.go index 90e0b83..3150bf4 100644 --- a/nn/linear.go +++ b/nn/linear.go @@ -18,8 +18,8 @@ type LinearConfig struct { // DefaultLinearConfig creates default LinearConfig with // weights initiated using KaimingUniform and Bias is set to true -func DefaultLinearConfig() LinearConfig { - return LinearConfig{ +func DefaultLinearConfig() *LinearConfig { + return &LinearConfig{ WsInit: NewKaimingUniformInit(), BsInit: nil, Bias: true, @@ -28,8 +28,8 @@ func DefaultLinearConfig() LinearConfig { // Linear is a linear fully-connected layer type Linear struct { - Ws ts.Tensor - Bs ts.Tensor + Ws *ts.Tensor + Bs *ts.Tensor } // NewLinear creates a new linear layer @@ -37,9 +37,9 @@ type Linear struct { // inDim - input dimension (x) [input features - columns] // outDim - output dimension (y) [output features - columns] // NOTE: w will have shape{outDim, inDim}; b will have shape{outDim} -func NewLinear(vs Path, inDim, outDim int64, c LinearConfig) Linear { +func NewLinear(vs Path, inDim, outDim int64, c *LinearConfig) *Linear { - var bs ts.Tensor + var bs *ts.Tensor // bs has size of output dimension switch c.Bias { case false: @@ -55,7 +55,7 @@ func NewLinear(vs Path, inDim, outDim int64, c LinearConfig) Linear { } } - return Linear{ + return &Linear{ Ws: vs.NewVar("weight", []int64{outDim, inDim}, c.WsInit).MustT(false), Bs: bs, } @@ -89,7 +89,7 @@ func NewLinear(vs Path, inDim, outDim int64, c LinearConfig) Linear { // 1 1 1 // 1 1 1 // 1 1 1 ] -func (l Linear) Forward(xs ts.Tensor) (retVal ts.Tensor) { +func (l *Linear) Forward(xs *ts.Tensor) (retVal *ts.Tensor) { mul := xs.MustMatmul(l.Ws, false) return mul.MustAdd(l.Bs, true) @@ -98,7 +98,7 @@ func (l Linear) Forward(xs ts.Tensor) (retVal ts.Tensor) { // ForwardT implements ModuleT interface for Linear layer. // // NOTE: train param will not be used. -func (l Linear) ForwardT(xs ts.Tensor, train bool) (retVal ts.Tensor) { +func (l *Linear) ForwardT(xs *ts.Tensor, train bool) (retVal *ts.Tensor) { mul := xs.MustMatmul(l.Ws, false) return mul.MustAdd(l.Bs, true) diff --git a/nn/optimizer.go b/nn/optimizer.go index 27eb398..d2d85e6 100644 --- a/nn/optimizer.go +++ b/nn/optimizer.go @@ -10,7 +10,7 @@ import ( // Optimizer is a struct object to run gradient descent. type Optimizer struct { - opt ts.COptimizer + opt *ts.COptimizer // variables Variables // having embedded sync.Mutex variablesInOptimizer uint8 config interface{} @@ -18,7 +18,7 @@ type Optimizer struct { // OptimizerConfig defines Optimizer configurations. These configs can be used to build optimizer. type OptimizerConfig interface { - buildCOpt(lr float64) (retVal ts.COptimizer, err error) + buildCOpt(lr float64) (*ts.COptimizer, error) // Build builds an optimizer with the specified learning rate handling variables stored in `vs`. // @@ -29,11 +29,11 @@ type OptimizerConfig interface { // (config AdamOptimizerConfig) Build(vs VarStore, lr float64) (retVal Optimizer, err error){ // return defaultBuild(config, vs, lr) // } - Build(vs VarStore, lr float64) (retVal Optimizer, err error) + Build(vs *VarStore, lr float64) (*Optimizer, error) } // defaultBuild is `default` Build method for OptimizerConfig interface -func defaultBuild(config OptimizerConfig, vs VarStore, lr float64) (retVal Optimizer, err error) { +func defaultBuild(config OptimizerConfig, vs *VarStore, lr float64) (retVal *Optimizer, err error) { opt, err := config.buildCOpt(lr) if err != nil { @@ -43,7 +43,7 @@ func defaultBuild(config OptimizerConfig, vs VarStore, lr float64) (retVal Optim var parameters []ts.Tensor for _, v := range vs.Vars.TrainableVariables { param := v.MustShallowClone() - parameters = append(parameters, param) + parameters = append(parameters, *param) } if len(vs.Vars.TrainableVariables) > 0 { @@ -54,7 +54,7 @@ func defaultBuild(config OptimizerConfig, vs VarStore, lr float64) (retVal Optim // TODO: should we clone or copy? - return Optimizer{ + return &Optimizer{ opt: opt, // variables: vs.Vars, variablesInOptimizer: uint8(len(vs.Vars.TrainableVariables)), @@ -74,8 +74,8 @@ type SGDConfig struct { } // DefaultSGDConfig creates SGDConfig with default values. -func DefaultSGDConfig() SGDConfig { - return SGDConfig{ +func DefaultSGDConfig() *SGDConfig { + return &SGDConfig{ Momentum: 0.0, Dampening: 0.0, Wd: 0.0, @@ -84,8 +84,8 @@ func DefaultSGDConfig() SGDConfig { } // NewSGD creates the configuration for a SGD optimizer with specified values -func NewSGDConfig(momentum, dampening, wd float64, nesterov bool) (retVal SGDConfig) { - return SGDConfig{ +func NewSGDConfig(momentum, dampening, wd float64, nesterov bool) *SGDConfig { + return &SGDConfig{ Momentum: momentum, Dampening: dampening, Wd: wd, @@ -94,11 +94,11 @@ func NewSGDConfig(momentum, dampening, wd float64, nesterov bool) (retVal SGDCon } // Implement OptimizerConfig interface for SGDConfig -func (c SGDConfig) buildCOpt(lr float64) (retVal ts.COptimizer, err error) { +func (c *SGDConfig) buildCOpt(lr float64) (*ts.COptimizer, error) { return ts.Sgd(lr, c.Momentum, c.Dampening, c.Wd, c.Nesterov) } -func (c SGDConfig) Build(vs VarStore, lr float64) (retVal Optimizer, err error) { +func (c *SGDConfig) Build(vs *VarStore, lr float64) (*Optimizer, error) { return defaultBuild(c, vs, lr) } @@ -112,8 +112,8 @@ type AdamConfig struct { } // DefaultAdamConfig creates AdamConfig with default values -func DefaultAdamConfig() AdamConfig { - return AdamConfig{ +func DefaultAdamConfig() *AdamConfig { + return &AdamConfig{ Beta1: 0.9, Beta2: 0.999, Wd: 0.0, @@ -121,8 +121,8 @@ func DefaultAdamConfig() AdamConfig { } // NewAdamConfig creates AdamConfig with specified values -func NewAdamConfig(beta1, beta2, wd float64) AdamConfig { - return AdamConfig{ +func NewAdamConfig(beta1, beta2, wd float64) *AdamConfig { + return &AdamConfig{ Beta1: beta1, Beta2: beta2, Wd: wd, @@ -130,11 +130,11 @@ func NewAdamConfig(beta1, beta2, wd float64) AdamConfig { } // Implement OptimizerConfig interface for AdamConfig -func (c AdamConfig) buildCOpt(lr float64) (retVal ts.COptimizer, err error) { +func (c *AdamConfig) buildCOpt(lr float64) (*ts.COptimizer, error) { return ts.Adam(lr, c.Beta1, c.Beta2, c.Wd) } -func (c AdamConfig) Build(vs VarStore, lr float64) (retVal Optimizer, err error) { +func (c *AdamConfig) Build(vs *VarStore, lr float64) (*Optimizer, error) { return defaultBuild(c, vs, lr) } @@ -150,8 +150,8 @@ type RMSPropConfig struct { } // DefaultAdamConfig creates AdamConfig with default values -func DefaultRMSPropConfig() RMSPropConfig { - return RMSPropConfig{ +func DefaultRMSPropConfig() *RMSPropConfig { + return &RMSPropConfig{ Alpha: 0.99, Eps: 1e-8, Wd: 0.0, @@ -161,8 +161,8 @@ func DefaultRMSPropConfig() RMSPropConfig { } // NewRMSPropConfig creates RMSPropConfig with specified values -func NewRMSPropConfig(alpha, eps, wd, momentum float64, centered bool) RMSPropConfig { - return RMSPropConfig{ +func NewRMSPropConfig(alpha, eps, wd, momentum float64, centered bool) *RMSPropConfig { + return &RMSPropConfig{ Alpha: alpha, Eps: eps, Wd: wd, @@ -172,11 +172,11 @@ func NewRMSPropConfig(alpha, eps, wd, momentum float64, centered bool) RMSPropCo } // Implement OptimizerConfig interface for RMSPropConfig -func (c RMSPropConfig) buildCOpt(lr float64) (retVal ts.COptimizer, err error) { +func (c *RMSPropConfig) buildCOpt(lr float64) (*ts.COptimizer, error) { return ts.RmsProp(lr, c.Alpha, c.Eps, c.Wd, c.Momentum, c.Centered) } -func (c RMSPropConfig) Build(vs VarStore, lr float64) (retVal Optimizer, err error) { +func (c *RMSPropConfig) Build(vs *VarStore, lr float64) (*Optimizer, error) { return defaultBuild(c, vs, lr) } @@ -229,7 +229,7 @@ func (opt *Optimizer) Step() { } // BackwardStep applies a backward step pass, update the gradients, and performs an optimization step. -func (opt *Optimizer) BackwardStep(loss ts.Tensor) { +func (opt *Optimizer) BackwardStep(loss *ts.Tensor) { opt.addMissingVariables() @@ -250,7 +250,7 @@ func (opt *Optimizer) BackwardStep(loss ts.Tensor) { // BackwardStepClip applies a backward step pass, update the gradients, and performs an optimization step. // // The gradients are clipped based on `max` before being applied. -func (opt *Optimizer) BackwardStepClip(loss ts.Tensor, max float64) { +func (opt *Optimizer) BackwardStepClip(loss *ts.Tensor, max float64) { opt.addMissingVariables() err := opt.opt.ZeroGrad() diff --git a/nn/rnn.go b/nn/rnn.go index 7f3332f..1a4d875 100644 --- a/nn/rnn.go +++ b/nn/rnn.go @@ -15,33 +15,33 @@ type RNN interface { // Applies a single step of the recurrent network. // // The input should have dimensions [batch_size, features]. - Step(input ts.Tensor, inState State) State + Step(input *ts.Tensor, inState State) State // Applies multiple steps of the recurrent network. // // The input should have dimensions [batch_size, seq_len, features]. // The initial state is the result of applying zero_state. - Seq(input ts.Tensor) (ts.Tensor, State) + Seq(input *ts.Tensor) (*ts.Tensor, State) // Applies multiple steps of the recurrent network. // // The input should have dimensions [batch_size, seq_len, features]. - SeqInit(input ts.Tensor, inState State) (ts.Tensor, State) + SeqInit(input *ts.Tensor, inState State) (*ts.Tensor, State) } // The state for a LSTM network, this contains two tensors. type LSTMState struct { - Tensor1 ts.Tensor - Tensor2 ts.Tensor + Tensor1 *ts.Tensor + Tensor2 *ts.Tensor } // The hidden state vector, which is also the output of the LSTM. -func (ls LSTMState) H() (retVal ts.Tensor) { +func (ls *LSTMState) H() *ts.Tensor { return ls.Tensor1.MustShallowClone() } // The cell state vector. -func (ls LSTMState) C() (retVal ts.Tensor) { +func (ls *LSTMState) C() *ts.Tensor { return ls.Tensor2.MustShallowClone() } @@ -57,8 +57,8 @@ type RNNConfig struct { } // Default creates default RNN configuration -func DefaultRNNConfig() RNNConfig { - return RNNConfig{ +func DefaultRNNConfig() *RNNConfig { + return &RNNConfig{ HasBiases: true, NumLayers: 1, Dropout: float64(0.0), @@ -74,12 +74,12 @@ func DefaultRNNConfig() RNNConfig { type LSTM struct { flatWeights []ts.Tensor hiddenDim int64 - config RNNConfig + config *RNNConfig device gotch.Device } // NewLSTM creates a LSTM layer. -func NewLSTM(vs Path, inDim, hiddenDim int64, cfg RNNConfig) (retVal LSTM) { +func NewLSTM(vs *Path, inDim, hiddenDim int64, cfg *RNNConfig) *LSTM { var numDirections int64 = 1 if cfg.Bidirectional { @@ -100,7 +100,7 @@ func NewLSTM(vs Path, inDim, hiddenDim int64, cfg RNNConfig) (retVal LSTM) { bIh := vs.Zeros("b_ih", []int64{gateDim}) bHh := vs.Zeros("b_hh", []int64{gateDim}) - flatWeights = append(flatWeights, wIh, wHh, bIh, bHh) + flatWeights = append(flatWeights, *wIh, *wHh, *bIh, *bHh) } } @@ -112,7 +112,7 @@ func NewLSTM(vs Path, inDim, hiddenDim int64, cfg RNNConfig) (retVal LSTM) { ts.Must_CudnnRnnFlattenWeight(flatWeights, 4, inDim, 2, hiddenDim, cfg.NumLayers, cfg.BatchFirst, cfg.Bidirectional) } - return LSTM{ + return &LSTM{ flatWeights: flatWeights, hiddenDim: hiddenDim, config: cfg, @@ -124,7 +124,7 @@ func NewLSTM(vs Path, inDim, hiddenDim int64, cfg RNNConfig) (retVal LSTM) { // Implement RNN interface for LSTM: // ================================= -func (l LSTM) ZeroState(batchDim int64) (retVal State) { +func (l *LSTM) ZeroState(batchDim int64) (retVal State) { var numDirections int64 = 1 if l.config.Bidirectional { numDirections = 2 @@ -144,7 +144,7 @@ func (l LSTM) ZeroState(batchDim int64) (retVal State) { return retVal } -func (l LSTM) Step(input ts.Tensor, inState State) (retVal State) { +func (l *LSTM) Step(input *ts.Tensor, inState State) (retVal State) { ip := input.MustUnsqueeze(1, false) output, state := l.SeqInit(ip, inState) @@ -156,7 +156,7 @@ func (l LSTM) Step(input ts.Tensor, inState State) (retVal State) { return state } -func (l LSTM) Seq(input ts.Tensor) (output ts.Tensor, state State) { +func (l *LSTM) Seq(input *ts.Tensor) (output *ts.Tensor, state State) { batchDim := input.MustSize()[0] inState := l.ZeroState(batchDim) @@ -169,9 +169,9 @@ func (l LSTM) Seq(input ts.Tensor) (output ts.Tensor, state State) { return output, state } -func (l LSTM) SeqInit(input ts.Tensor, inState State) (ts.Tensor, State) { +func (l *LSTM) SeqInit(input *ts.Tensor, inState State) (*ts.Tensor, State) { - output, h, c := input.MustLstm([]ts.Tensor{inState.(LSTMState).Tensor1, inState.(LSTMState).Tensor2}, l.flatWeights, l.config.HasBiases, l.config.NumLayers, l.config.Dropout, l.config.Train, l.config.Bidirectional, l.config.BatchFirst) + output, h, c := input.MustLstm([]ts.Tensor{*inState.(LSTMState).Tensor1, *inState.(LSTMState).Tensor2}, l.flatWeights, l.config.HasBiases, l.config.NumLayers, l.config.Dropout, l.config.Train, l.config.Bidirectional, l.config.BatchFirst) return output, LSTMState{ Tensor1: h, @@ -181,10 +181,10 @@ func (l LSTM) SeqInit(input ts.Tensor, inState State) (ts.Tensor, State) { // GRUState is a GRU state. It contains a single tensor. type GRUState struct { - Tensor ts.Tensor + Tensor *ts.Tensor } -func (gs GRUState) Value() ts.Tensor { +func (gs *GRUState) Value() *ts.Tensor { return gs.Tensor } @@ -194,12 +194,12 @@ func (gs GRUState) Value() ts.Tensor { type GRU struct { flatWeights []ts.Tensor hiddenDim int64 - config RNNConfig + config *RNNConfig device gotch.Device } // NewGRU create a new GRU layer -func NewGRU(vs Path, inDim, hiddenDim int64, cfg RNNConfig) (retVal GRU) { +func NewGRU(vs *Path, inDim, hiddenDim int64, cfg *RNNConfig) (retVal *GRU) { var numDirections int64 = 1 if cfg.Bidirectional { numDirections = 2 @@ -222,7 +222,7 @@ func NewGRU(vs Path, inDim, hiddenDim int64, cfg RNNConfig) (retVal GRU) { bIh := vs.Zeros("b_ih", []int64{gateDim}) bHh := vs.Zeros("b_hh", []int64{gateDim}) - flatWeights = append(flatWeights, wIh, wHh, bIh, bHh) + flatWeights = append(flatWeights, *wIh, *wHh, *bIh, *bHh) } } @@ -232,7 +232,7 @@ func NewGRU(vs Path, inDim, hiddenDim int64, cfg RNNConfig) (retVal GRU) { ts.Must_CudnnRnnFlattenWeight(flatWeights, 4, inDim, 3, hiddenDim, cfg.NumLayers, cfg.BatchFirst, cfg.Bidirectional) } - return GRU{ + return &GRU{ flatWeights: flatWeights, hiddenDim: hiddenDim, config: cfg, @@ -243,7 +243,7 @@ func NewGRU(vs Path, inDim, hiddenDim int64, cfg RNNConfig) (retVal GRU) { // Implement RNN interface for GRU: // ================================ -func (g GRU) ZeroState(batchDim int64) (retVal State) { +func (g *GRU) ZeroState(batchDim int64) (retVal State) { var numDirections int64 = 1 if g.config.Bidirectional { numDirections = 2 @@ -257,7 +257,7 @@ func (g GRU) ZeroState(batchDim int64) (retVal State) { return GRUState{Tensor: tensor} } -func (g GRU) Step(input ts.Tensor, inState State) (retVal State) { +func (g *GRU) Step(input *ts.Tensor, inState State) (retVal State) { unsqueezedInput := input.MustUnsqueeze(1, false) output, state := g.SeqInit(unsqueezedInput, inState) @@ -269,7 +269,7 @@ func (g GRU) Step(input ts.Tensor, inState State) (retVal State) { return state } -func (g GRU) Seq(input ts.Tensor) (output ts.Tensor, state State) { +func (g *GRU) Seq(input *ts.Tensor) (output *ts.Tensor, state State) { batchDim := input.MustSize()[0] inState := g.ZeroState(batchDim) @@ -281,7 +281,7 @@ func (g GRU) Seq(input ts.Tensor) (output ts.Tensor, state State) { return output, state } -func (g GRU) SeqInit(input ts.Tensor, inState State) (ts.Tensor, State) { +func (g *GRU) SeqInit(input *ts.Tensor, inState State) (*ts.Tensor, State) { output, h := input.MustGru(inState.(GRUState).Tensor, g.flatWeights, g.config.HasBiases, g.config.NumLayers, g.config.Dropout, g.config.Train, g.config.Bidirectional, g.config.BatchFirst) diff --git a/nn/rnn_test.go b/nn/rnn_test.go index 79323b1..762e022 100644 --- a/nn/rnn_test.go +++ b/nn/rnn_test.go @@ -10,7 +10,7 @@ import ( ts "github.com/sugarme/gotch/tensor" ) -func gruTest(rnnConfig nn.RNNConfig, t *testing.T) { +func gruTest(rnnConfig *nn.RNNConfig, t *testing.T) { var ( batchDim int64 = 5 @@ -47,7 +47,7 @@ func gruTest(rnnConfig nn.RNNConfig, t *testing.T) { input = ts.MustRandn([]int64{batchDim, seqLen, inputDim}, gotch.Float, gotch.CPU) output, _ = gru.Seq(input) wantSeq := []int64{batchDim, seqLen, outputDim * numDirections} - gotSeq := output.(ts.Tensor).MustSize() + gotSeq := output.(*ts.Tensor).MustSize() if !reflect.DeepEqual(wantSeq, gotSeq) { fmt.Println("Seq test:") @@ -75,7 +75,7 @@ func TestGRU(t *testing.T) { gruTest(cfg, t) } -func lstmTest(rnnConfig nn.RNNConfig, t *testing.T) { +func lstmTest(rnnConfig *nn.RNNConfig, t *testing.T) { var ( batchDim int64 = 5 @@ -121,7 +121,7 @@ func lstmTest(rnnConfig nn.RNNConfig, t *testing.T) { output, _ = lstm.Seq(input) wantSeq := []int64{batchDim, seqLen, outputDim * numDirections} - gotSeq := output.(ts.Tensor).MustSize() + gotSeq := output.(*ts.Tensor).MustSize() if !reflect.DeepEqual(wantSeq, gotSeq) { fmt.Println("Seq test:") diff --git a/nn/sequential.go b/nn/sequential.go index 446819e..80f9029 100644 --- a/nn/sequential.go +++ b/nn/sequential.go @@ -14,15 +14,15 @@ type Sequential struct { } // Seq creates a new empty sequential layer -func Seq() Sequential { - return Sequential{layers: make([]ts.Module, 0)} +func Seq() *Sequential { + return &Sequential{layers: make([]ts.Module, 0)} } // Sequential methods: //==================== // Len returns number of sub-layers embedded in this layer -func (s Sequential) Len() (retVal int64) { +func (s *Sequential) Len() (retVal int64) { return int64(len(s.layers)) } @@ -47,7 +47,7 @@ func (s *Sequential) AddFn(fn ts.Module) { } // ForwardAll applies the forward pass and returns the output for each layer. -func (s *Sequential) ForwardAll(xs ts.Tensor, opts ...uint8) (retVal []ts.Tensor) { +func (s *Sequential) ForwardAll(xs *ts.Tensor, opts ...uint8) (retVal []ts.Tensor) { var n uint8 = uint8(len(s.layers)) if len(opts) > 0 { @@ -55,11 +55,11 @@ func (s *Sequential) ForwardAll(xs ts.Tensor, opts ...uint8) (retVal []ts.Tensor } if s.IsEmpty() { - return []ts.Tensor{xs.MustShallowClone()} + return []ts.Tensor{*xs.MustShallowClone()} } for i := 0; i < int(n); i++ { - retVal = append(retVal, s.layers[i].Forward(xs)) + retVal = append(retVal, *s.layers[i].Forward(xs)) } return retVal @@ -76,7 +76,7 @@ func WithUint8(n uint8) func() uint8 { // ========================================== // Forward implements Module interface for Sequential -func (s *Sequential) Forward(xs ts.Tensor) (retVal ts.Tensor) { +func (s *Sequential) Forward(xs *ts.Tensor) (retVal *ts.Tensor) { if s.IsEmpty() { return xs.MustShallowClone() } @@ -85,12 +85,12 @@ func (s *Sequential) Forward(xs ts.Tensor) (retVal ts.Tensor) { outs := make([]ts.Tensor, len(s.layers)) for i := 0; i < len(s.layers); i++ { if i == 0 { - outs[0] = s.layers[i].Forward(xs) + outs[0] = *s.layers[i].Forward(xs) defer outs[0].MustDrop() } else if i == len(s.layers)-1 { - return s.layers[i].Forward(outs[i-1]) + return s.layers[i].Forward(&outs[i-1]) } else { - outs[i] = s.layers[i].Forward(outs[i-1]) + outs[i] = *s.layers[i].Forward(&outs[i-1]) defer outs[i].MustDrop() } } @@ -104,8 +104,8 @@ type SequentialT struct { } /// SeqT creates a new empty sequential layer. -func SeqT() SequentialT { - return SequentialT{ +func SeqT() *SequentialT { + return &SequentialT{ layers: make([]ts.ModuleT, 0), } } @@ -140,7 +140,7 @@ func (s *SequentialT) IsEmpty() (retVal bool) { * return currTs * } * */ -func (s SequentialT) ForwardT(xs ts.Tensor, train bool) (retVal ts.Tensor) { +func (s *SequentialT) ForwardT(xs *ts.Tensor, train bool) (retVal *ts.Tensor) { if s.IsEmpty() { return xs.MustShallowClone() } @@ -149,12 +149,12 @@ func (s SequentialT) ForwardT(xs ts.Tensor, train bool) (retVal ts.Tensor) { outs := make([]ts.Tensor, len(s.layers)) for i := 0; i < len(s.layers); i++ { if i == 0 { - outs[0] = s.layers[i].ForwardT(xs, train) + outs[0] = *s.layers[i].ForwardT(xs, train) defer outs[0].MustDrop() } else if i == len(s.layers)-1 { - return s.layers[i].ForwardT(outs[i-1], train) + return s.layers[i].ForwardT(&outs[i-1], train) } else { - outs[i] = s.layers[i].ForwardT(outs[i-1], train) + outs[i] = *s.layers[i].ForwardT(&outs[i-1], train) defer outs[i].MustDrop() } } @@ -187,7 +187,7 @@ func (s *SequentialT) AddFnT(fn ts.ModuleT) { } // ForwardAll applies the forward pass and returns the output for each layer. -func (s *SequentialT) ForwardAllT(xs ts.Tensor, train bool, opts ...uint8) (retVal []ts.Tensor) { +func (s *SequentialT) ForwardAllT(xs *ts.Tensor, train bool, opts ...uint8) (retVal []ts.Tensor) { var n uint8 = uint8(len(s.layers)) if len(opts) > 0 { @@ -195,13 +195,13 @@ func (s *SequentialT) ForwardAllT(xs ts.Tensor, train bool, opts ...uint8) (retV } if s.IsEmpty() { - return []ts.Tensor{xs.MustShallowClone()} + return []ts.Tensor{*xs.MustShallowClone()} } currTs := xs for i := 0; i < int(n); i++ { res := s.layers[i].ForwardT(currTs, train) - retVal = append(retVal, res) + retVal = append(retVal, *res) currTs = res } @@ -214,15 +214,15 @@ func (s *SequentialT) ForwardAllT(xs ts.Tensor, train bool, opts ...uint8) (retV // Ref. https://stackoverflow.com/a/42182987 // NOTE: Specifically, `ForwardWith` is used to wrap anonymous function // as input parameter of `AddFn` Sequential method. -type ForwardWith func(ts.Tensor) ts.Tensor +type ForwardWith func(*ts.Tensor) *ts.Tensor -func (fw ForwardWith) Forward(xs ts.Tensor) ts.Tensor { +func (fw ForwardWith) Forward(xs *ts.Tensor) *ts.Tensor { return fw(xs) } -type ForwardTWith func(ts.Tensor, bool) ts.Tensor +type ForwardTWith func(*ts.Tensor, bool) *ts.Tensor -func (fw ForwardTWith) ForwardT(xs ts.Tensor, train bool) ts.Tensor { +func (fw ForwardTWith) ForwardT(xs *ts.Tensor, train bool) *ts.Tensor { return fw(xs, train) } @@ -235,7 +235,7 @@ func (fw ForwardTWith) ForwardT(xs ts.Tensor, train bool) ts.Tensor { // This seems not working in Go. // There 2 ways to get around. One is freeze VarStore, the other is // set manually set AutoGrad at `loss` tensor. I.e., `loss = loss.MustSetRequiresGrad(true)` -func BatchAccuracyForLogits(vs VarStore, m ts.ModuleT, xs, ys ts.Tensor, d gotch.Device, batchSize int) (retVal float64) { +func BatchAccuracyForLogits(vs *VarStore, m ts.ModuleT, xs, ys *ts.Tensor, d gotch.Device, batchSize int) (retVal float64) { var ( sumAccuracy float64 = 0.0 @@ -272,7 +272,7 @@ func BatchAccuracyForLogits(vs VarStore, m ts.ModuleT, xs, ys ts.Tensor, d gotch // BatchAccuracyForLogitIdx is an alternative of BatchAccuracyForLogits to // calculate accuracy for specified batch on module weight. It uses tensor // indexing instead of Iter2 -func BatchAccuracyForLogitsIdx(vs VarStore, m ts.ModuleT, xs, ys ts.Tensor, d gotch.Device, batchSize int) (retVal float64) { +func BatchAccuracyForLogitsIdx(vs *VarStore, m ts.ModuleT, xs, ys *ts.Tensor, d gotch.Device, batchSize int) (retVal float64) { var ( sumAccuracy float64 = 0.0 sampleCount float64 = 0.0 diff --git a/nn/sparse.go b/nn/sparse.go index 47dc733..f545b4e 100644 --- a/nn/sparse.go +++ b/nn/sparse.go @@ -14,8 +14,8 @@ type EmbeddingConfig struct { PaddingIdx int64 } -func DefaultEmbeddingConfig() EmbeddingConfig { - return EmbeddingConfig{ +func DefaultEmbeddingConfig() *EmbeddingConfig { + return &EmbeddingConfig{ Sparse: false, ScaleGradByFreq: false, WsInit: NewRandnInit(0.0, 1.0), @@ -28,13 +28,13 @@ func DefaultEmbeddingConfig() EmbeddingConfig { // An embedding layer acts as a simple lookup table that stores embeddings. // This is commonly used to store word embeddings. type Embedding struct { - Ws ts.Tensor - config EmbeddingConfig + Ws *ts.Tensor + config *EmbeddingConfig } // NewEmbedding creates a new Embedding -func NewEmbedding(vs Path, numEmbeddings int64, embeddingDim int64, config EmbeddingConfig) Embedding { - return Embedding{ +func NewEmbedding(vs *Path, numEmbeddings int64, embeddingDim int64, config *EmbeddingConfig) *Embedding { + return &Embedding{ Ws: vs.NewVar("weight", []int64{numEmbeddings, embeddingDim}, config.WsInit), config: config, } @@ -44,11 +44,11 @@ func NewEmbedding(vs Path, numEmbeddings int64, embeddingDim int64, config Embed // ========================================= // Forward implements Module interface for Embedding -func (e Embedding) Forward(xs ts.Tensor) (retVal ts.Tensor) { +func (e *Embedding) Forward(xs *ts.Tensor) *ts.Tensor { return ts.MustEmbedding(e.Ws, xs, e.config.PaddingIdx, e.config.ScaleGradByFreq, e.config.Sparse) } // ForwardT implements ModuleT interface for Embedding -func (e Embedding) ForwardT(xs ts.Tensor, train bool) (retVal ts.Tensor) { +func (e *Embedding) ForwardT(xs *ts.Tensor, train bool) *ts.Tensor { return ts.MustEmbedding(e.Ws, xs, e.config.PaddingIdx, e.config.ScaleGradByFreq, e.config.Sparse) } diff --git a/nn/sparse_test.go b/nn/sparse_test.go index 58bd438..01943e8 100644 --- a/nn/sparse_test.go +++ b/nn/sparse_test.go @@ -9,7 +9,7 @@ import ( ts "github.com/sugarme/gotch/tensor" ) -func embeddingTest(embeddingConfig nn.EmbeddingConfig, t *testing.T) { +func embeddingTest(embeddingConfig *nn.EmbeddingConfig, t *testing.T) { var ( batchDim int64 = 5 diff --git a/nn/varstore.go b/nn/varstore.go index e93d40b..f9457e0 100644 --- a/nn/varstore.go +++ b/nn/varstore.go @@ -20,7 +20,7 @@ const SEP = "." // however the tensor is not set to require gradients. type Variables struct { mutex *sync.Mutex - NamedVariables map[string]ts.Tensor + NamedVariables map[string]*ts.Tensor TrainableVariables []ts.Tensor } @@ -45,14 +45,14 @@ type Entry struct { } // NewVarStore creates a new variable store located on the specified device -func NewVarStore(device gotch.Device) VarStore { +func NewVarStore(device gotch.Device) *VarStore { variables := Variables{ mutex: &sync.Mutex{}, - NamedVariables: make(map[string]ts.Tensor, 0), + NamedVariables: make(map[string]*ts.Tensor, 0), TrainableVariables: make([]ts.Tensor, 0), } - return VarStore{ + return &VarStore{ device: device, Vars: variables, } @@ -94,7 +94,7 @@ func (vs *VarStore) TrainableVariables() (retVal []ts.Tensor) { retVal = vs.Vars.TrainableVariables for _, t := range vs.Vars.TrainableVariables { - retVal = append(retVal, t.MustShallowClone()) + retVal = append(retVal, *t.MustShallowClone()) } return retVal @@ -108,7 +108,7 @@ func (vs *VarStore) Variables() (retVal map[string]ts.Tensor) { retVal = make(map[string]ts.Tensor, 0) for k, v := range vs.Vars.NamedVariables { - retVal[k] = v.MustShallowClone() + retVal[k] = *v.MustShallowClone() } return retVal @@ -119,8 +119,8 @@ func (vs *VarStore) Variables() (retVal map[string]ts.Tensor) { // NOTE: Variables are named and organized using paths. This function returns // the top level path for the var store and can be combined with '/' // to create sub-paths. -func (vs *VarStore) Root() (retVal Path) { - return Path{ +func (vs *VarStore) Root() *Path { + return &Path{ path: []string{}, varstore: vs, } @@ -130,7 +130,7 @@ func (vs *VarStore) Root() (retVal Path) { // // NOTE: Weight values for all the tensors currently stored in the // var-store gets saved in the given file. -func (vs *VarStore) Save(filepath string) (err error) { +func (vs *VarStore) Save(filepath string) error { vs.Vars.mutex.Lock() defer vs.Vars.mutex.Unlock() @@ -155,7 +155,7 @@ func (vs *VarStore) Save(filepath string) (err error) { // for these tensors are modified. // It will throw error if name of the loaded tensors can not find // in the current var-store named tensors set. -func (vs *VarStore) Load(filepath string) (err error) { +func (vs *VarStore) Load(filepath string) error { namedTensors, err := ts.LoadMultiWithDevice(filepath, vs.device) if err != nil { return err @@ -163,7 +163,7 @@ func (vs *VarStore) Load(filepath string) (err error) { var namedTensorsMap map[string]ts.Tensor = make(map[string]ts.Tensor, 0) for _, namedTensor := range namedTensors { - namedTensorsMap[namedTensor.Name] = namedTensor.Tensor + namedTensorsMap[namedTensor.Name] = *namedTensor.Tensor } // Match and in-place copy value (update) from newly loaded tensors @@ -190,7 +190,7 @@ func (vs *VarStore) Load(filepath string) (err error) { } ts.NoGrad(func() { - vs.Vars.NamedVariables[tsName].Copy_(currTs) + vs.Vars.NamedVariables[tsName].Copy_(&currTs) }) } return nil @@ -213,7 +213,7 @@ func (vs *VarStore) LoadPartial(filepath string) (retVal []string, err error) { return nil, err } - var namedTensorsMap map[string]ts.Tensor = make(map[string]ts.Tensor, 0) + var namedTensorsMap map[string]*ts.Tensor = make(map[string]*ts.Tensor, 0) for _, namedTensor := range namedTensors { namedTensorsMap[namedTensor.Name] = namedTensor.Tensor } @@ -226,7 +226,7 @@ func (vs *VarStore) LoadPartial(filepath string) (retVal []string, err error) { defer vs.Vars.mutex.Unlock() for tsName := range vs.Vars.NamedVariables { - var currTs ts.Tensor + var currTs *ts.Tensor var ok bool // missing variable @@ -320,7 +320,7 @@ func (vs *VarStore) Copy(src VarStore) (err error) { // ============= // Sub gets a sub-path of the given path. -func (p *Path) Sub(str string) (retVal Path) { +func (p *Path) Sub(str string) *Path { if strings.Contains(str, SEP) { log.Fatalf("Sub name cannot contain %v (%v)\n", SEP, str) @@ -328,7 +328,7 @@ func (p *Path) Sub(str string) (retVal Path) { path := p.path path = append(path, str) - return Path{ + return &Path{ path: path, varstore: p.varstore, } @@ -355,7 +355,7 @@ func (p *Path) getpath(name string) (retVal string) { } } -func (p *Path) add(name string, newTs ts.Tensor, trainable bool) (retVal ts.Tensor) { +func (p *Path) add(name string, newTs *ts.Tensor, trainable bool) (retVal *ts.Tensor) { path := p.getpath(name) p.varstore.Vars.mutex.Lock() @@ -366,7 +366,7 @@ func (p *Path) add(name string, newTs ts.Tensor, trainable bool) (retVal ts.Tens } var ( - tensor ts.Tensor + tensor *ts.Tensor err error ) if trainable { @@ -379,7 +379,7 @@ func (p *Path) add(name string, newTs ts.Tensor, trainable bool) (retVal ts.Tens } if trainable { - p.varstore.Vars.TrainableVariables = append(p.varstore.Vars.TrainableVariables, tensor) + p.varstore.Vars.TrainableVariables = append(p.varstore.Vars.TrainableVariables, *tensor) } p.varstore.Vars.NamedVariables[path] = tensor @@ -387,7 +387,7 @@ func (p *Path) add(name string, newTs ts.Tensor, trainable bool) (retVal ts.Tens return tensor } -func (p *Path) getOrAddWithLock(name string, tensor ts.Tensor, trainable bool, variables Variables) (retVal ts.Tensor) { +func (p *Path) getOrAddWithLock(name string, tensor *ts.Tensor, trainable bool, variables Variables) (retVal *ts.Tensor) { path := p.getpath(name) // if found, return it @@ -397,7 +397,7 @@ func (p *Path) getOrAddWithLock(name string, tensor ts.Tensor, trainable bool, v // not found, add it var err error - var ttensor ts.Tensor + var ttensor *ts.Tensor if trainable { ttensor, err = tensor.SetRequiresGrad(true, false) if err != nil { @@ -408,7 +408,7 @@ func (p *Path) getOrAddWithLock(name string, tensor ts.Tensor, trainable bool, v } if trainable { - variables.TrainableVariables = append(variables.TrainableVariables, ttensor) + variables.TrainableVariables = append(variables.TrainableVariables, *ttensor) } variables.NamedVariables[path] = ttensor @@ -422,7 +422,7 @@ func (p *Path) getOrAddWithLock(name string, tensor ts.Tensor, trainable bool, v // has the specified shape. The variable will not be trainable so // gradients will not be tracked. // The variable uses a float tensor initialized with zeros. -func (p *Path) ZerosNoTrain(name string, dims []int64) (retVal ts.Tensor) { +func (p *Path) ZerosNoTrain(name string, dims []int64) (retVal *ts.Tensor) { device := p.Device() z, err := ts.Zeros(dims, gotch.Float, device) @@ -439,7 +439,7 @@ func (p *Path) ZerosNoTrain(name string, dims []int64) (retVal ts.Tensor) { // has the specified shape. The variable will not be trainable so // gradients will not be tracked. // The variable uses a float tensor initialized with ones. -func (p *Path) OnesNoTrain(name string, dims []int64) (retVal ts.Tensor) { +func (p *Path) OnesNoTrain(name string, dims []int64) (retVal *ts.Tensor) { device := p.Device() z, err := ts.Ones(dims, gotch.Float, device) @@ -457,7 +457,7 @@ func (p *Path) OnesNoTrain(name string, dims []int64) (retVal ts.Tensor) { // will be tracked. // The variable uses a float tensor initialized as per the // related argument. -func (p *Path) NewVar(name string, dims []int64, ini Init) (retVal ts.Tensor) { +func (p *Path) NewVar(name string, dims []int64, ini Init) (retVal *ts.Tensor) { v := ini.InitTensor(dims, p.varstore.device) @@ -470,7 +470,7 @@ func (p *Path) NewVar(name string, dims []int64, ini Init) (retVal ts.Tensor) { // has the specified shape. The variable is trainable, its gradient // will be tracked. // The variable uses a float tensor initialized with zeros. -func (p *Path) Zeros(name string, dims []int64) (retVal ts.Tensor) { +func (p *Path) Zeros(name string, dims []int64) (retVal *ts.Tensor) { return p.NewVar(name, dims, NewConstInit(0.0)) } @@ -481,7 +481,7 @@ func (p *Path) Zeros(name string, dims []int64) (retVal ts.Tensor) { // has the specified shape. The variable is trainable, its gradient // will be tracked. // The variable uses a float tensor initialized with ones. -func (p *Path) Ones(name string, dims []int64) (retVal ts.Tensor) { +func (p *Path) Ones(name string, dims []int64) (retVal *ts.Tensor) { return p.NewVar(name, dims, NewConstInit(1.0)) } @@ -493,7 +493,7 @@ func (p *Path) Ones(name string, dims []int64) (retVal ts.Tensor) { // will be tracked. // The variable uses a float tensor initialized randomly using a // standard normal distribution. -func (p *Path) RandnStandard(name string, dims []int64) (retVal ts.Tensor) { +func (p *Path) RandnStandard(name string, dims []int64) (retVal *ts.Tensor) { return p.NewVar(name, dims, NewRandnInit(0.0, 1.0)) } @@ -505,7 +505,7 @@ func (p *Path) RandnStandard(name string, dims []int64) (retVal ts.Tensor) { // will be tracked. // The variable uses a float tensor initialized randomly using a // normal distribution with the specified mean and standard deviation. -func (p *Path) Randn(name string, dims []int64, mean float64, stdev float64) (retVal ts.Tensor) { +func (p *Path) Randn(name string, dims []int64, mean float64, stdev float64) (retVal *ts.Tensor) { return p.NewVar(name, dims, NewRandnInit(mean, stdev)) } @@ -517,7 +517,7 @@ func (p *Path) Randn(name string, dims []int64, mean float64, stdev float64) (re // will be tracked. // The variable uses a float tensor initialized randomly using a // uniform distribution between the specified bounds. -func (p *Path) Uniform(name string, dims []int64, lo, up float64) (retVal ts.Tensor) { +func (p *Path) Uniform(name string, dims []int64, lo, up float64) (retVal *ts.Tensor) { return p.NewVar(name, dims, NewUniformInit(lo, up)) } @@ -529,7 +529,7 @@ func (p *Path) Uniform(name string, dims []int64, lo, up float64) (retVal ts.Ten // will be tracked. // The variable uses a float tensor initialized randomly using a // uniform distribution which bounds follow Kaiming initialization. -func (p *Path) KaimingUniform(name string, dims []int64) (retVal ts.Tensor) { +func (p *Path) KaimingUniform(name string, dims []int64) (retVal *ts.Tensor) { return p.NewVar(name, dims, NewKaimingUniformInit()) } @@ -541,7 +541,7 @@ func (p *Path) KaimingUniform(name string, dims []int64) (retVal ts.Tensor) { // will be tracked. // The variable uses a float tensor initialized by copying some // given tensor. -func (p *Path) VarCopy(name string, t ts.Tensor) (retVal ts.Tensor) { +func (p *Path) VarCopy(name string, t *ts.Tensor) (retVal *ts.Tensor) { size, err := t.Size() if err != nil { @@ -557,7 +557,7 @@ func (p *Path) VarCopy(name string, t ts.Tensor) (retVal ts.Tensor) { } // Get gets the tensor corresponding to a given name if present. -func (p *Path) Get(name string) (retVal ts.Tensor, err error) { +func (p *Path) Get(name string) (retVal *ts.Tensor, err error) { p.varstore.Vars.mutex.Lock() defer p.varstore.Vars.mutex.Unlock() @@ -572,11 +572,11 @@ func (p *Path) Get(name string) (retVal ts.Tensor, err error) { } // Entry gets the entry corresponding to a given name for in-place manipulation. -func (p *Path) Entry(name string) (retVal Entry) { +func (p *Path) Entry(name string) *Entry { p.varstore.Vars.mutex.Lock() defer p.varstore.Vars.mutex.Unlock() - return Entry{ + return &Entry{ name: name, variables: &p.varstore.Vars, path: p, @@ -592,14 +592,14 @@ func (p *Path) Entry(name string) (retVal Entry) { // var store, the corresponding tensor is returned. Otherwise a new // variable is added to the var-store with the entry name and is // initialized according to the init parameter. -func (e *Entry) OrVar(dims []int64, init Init) (retVal ts.Tensor) { +func (e *Entry) OrVar(dims []int64, init Init) (retVal *ts.Tensor) { v := init.InitTensor(dims, e.path.varstore.device) return e.path.getOrAddWithLock(e.name, v, true, *e.variables) } // Returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrVarCopy(tensor ts.Tensor) (retVal ts.Tensor) { +func (e *Entry) OrVarCopy(tensor *ts.Tensor) (retVal *ts.Tensor) { size, err := tensor.Size() if err != nil { @@ -615,50 +615,50 @@ func (e *Entry) OrVarCopy(tensor ts.Tensor) (retVal ts.Tensor) { } // Returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrKaimingUniform(dims []int64) (retVal ts.Tensor) { +func (e *Entry) OrKaimingUniform(dims []int64) (retVal *ts.Tensor) { return e.OrVar(dims, NewKaimingUniformInit()) } // OrOnes returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrOnes(dims []int64) (retVal ts.Tensor) { +func (e *Entry) OrOnes(dims []int64) (retVal *ts.Tensor) { return e.OrVar(dims, NewConstInit(1.0)) } // OrOnesNoTrain returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrOnesNoTrain(dims []int64) (retVal ts.Tensor) { +func (e *Entry) OrOnesNoTrain(dims []int64) (retVal *ts.Tensor) { o := ts.MustOnes(dims, gotch.Float, e.path.Device()) return e.path.getOrAddWithLock(e.name, o, true, *e.variables) } // OrRandn returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrRandn(dims []int64, mean, stdev float64) (retVal ts.Tensor) { +func (e *Entry) OrRandn(dims []int64, mean, stdev float64) (retVal *ts.Tensor) { return e.OrVar(dims, NewRandnInit(mean, stdev)) } // OrRandnStandard returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrRandnStandard(dims []int64) (retVal ts.Tensor) { +func (e *Entry) OrRandnStandard(dims []int64) (retVal *ts.Tensor) { return e.OrVar(dims, NewRandnInit(0.0, 1.0)) } // OrUniform returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrUniform(dims []int64, lo, up float64) (retVal ts.Tensor) { +func (e *Entry) OrUniform(dims []int64, lo, up float64) (retVal *ts.Tensor) { return e.OrVar(dims, NewUniformInit(lo, up)) } // OrZeros returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrZeros(dims []int64) (retVal ts.Tensor) { +func (e *Entry) OrZeros(dims []int64) (retVal *ts.Tensor) { return e.OrVar(dims, NewConstInit(0.0)) } // OrZerosNoTrain returns the existing entry if, otherwise create a new variable. -func (e *Entry) OrZerosNoTrain(dims []int64) (retVal ts.Tensor) { +func (e *Entry) OrZerosNoTrain(dims []int64) (retVal *ts.Tensor) { z := ts.MustZeros(dims, gotch.Float, e.path.Device()) return e.path.getOrAddWithLock(e.name, z, true, *e.variables) diff --git a/nn/varstore_test.go b/nn/varstore_test.go index 64c8825..3ed2775 100644 --- a/nn/varstore_test.go +++ b/nn/varstore_test.go @@ -46,7 +46,7 @@ func TestSaveLoad(t *testing.T) { panic(err) } - add := func(vs nn.Path) (ts.Tensor, ts.Tensor) { + add := func(vs *nn.Path) (*ts.Tensor, *ts.Tensor) { subA := vs.Sub("a") subB := subA.Sub("b") v := subB.Ones("t2", []int64{3}) diff --git a/tensor/data.go b/tensor/data.go index 99ff498..4156afd 100644 --- a/tensor/data.go +++ b/tensor/data.go @@ -16,8 +16,8 @@ import ( // containing a (potentially random) slice of each of the two input // tensors. type Iter2 struct { - xs Tensor - ys Tensor + xs *Tensor + ys *Tensor batchIndex int64 batchSize int64 totalSize int64 @@ -38,12 +38,16 @@ type Iter2 struct { // * `xs` - the features to be used by the model. // * `ys` - the targets that the model attempts to predict. // * `batch_size` - the size of batches to be returned. -func NewIter2(xs, ys Tensor, batchSize int64) (retVal Iter2, err error) { +func NewIter2(xs, ys *Tensor, batchSize int64) (*Iter2, error) { + var ( + iter *Iter2 + err error + ) totalSize := xs.MustSize()[0] if ys.MustSize()[0] != totalSize { err = fmt.Errorf("Different dimension for the two inputs: %v - %v", xs.MustSize(), ys.MustSize()) - return retVal, err + return nil, err } // xsClone, err := xs.ZerosLike(false) @@ -58,7 +62,7 @@ func NewIter2(xs, ys Tensor, batchSize int64) (retVal Iter2, err error) { // } // ysClone.Copy_(ys) - retVal = Iter2{ + iter = &Iter2{ xs: xs.MustShallowClone(), ys: ys.MustShallowClone(), // xs: xsClone, @@ -69,7 +73,7 @@ func NewIter2(xs, ys Tensor, batchSize int64) (retVal Iter2, err error) { returnSmallLastBatch: false, } - return retVal, nil + return iter, nil } // MustNewIter2 returns a new iterator. @@ -84,14 +88,14 @@ func NewIter2(xs, ys Tensor, batchSize int64) (retVal Iter2, err error) { // * `xs` - the features to be used by the model. // * `ys` - the targets that the model attempts to predict. // * `batch_size` - the size of batches to be returned. -func MustNewIter2(xs, ys Tensor, batchSize int64) (retVal Iter2) { - retVal, err := NewIter2(xs, ys, batchSize) +func MustNewIter2(xs, ys *Tensor, batchSize int64) *Iter2 { + iter, err := NewIter2(xs, ys, batchSize) if err != nil { log.Fatal(err) } - return retVal + return iter } // Shuffle shuffles the dataset. @@ -108,20 +112,20 @@ func (it *Iter2) Shuffle() { } // ToDevice transfers the mini-batches to a specified device. -func (it Iter2) ToDevice(device gotch.Device) (retVal Iter2) { +func (it *Iter2) ToDevice(device gotch.Device) *Iter2 { it.device = device return it } // ReturnSmallLastBatch when set, returns the last batch even if smaller than the batch size. -func (it Iter2) ReturnSmallLastBatch() (retVal Iter2) { +func (it *Iter2) ReturnSmallLastBatch() *Iter2 { it.returnSmallLastBatch = true return it } type Iter2Item struct { - Data Tensor - Label Tensor + Data *Tensor + Label *Tensor } // Next implements iterator for Iter2 @@ -148,7 +152,7 @@ func (it *Iter2) Next() (item Iter2Item, ok bool) { } } -func (it Iter2) Drop() { +func (it *Iter2) Drop() { it.xs.MustDrop() it.ys.MustDrop() } @@ -156,17 +160,17 @@ func (it Iter2) Drop() { // TextData represent text data in tensor of runes (uint8) // and its corresponding string type TextData struct { - Data Tensor // frequency (occurence) of byte value from input text - CharForLabel []rune // unique rune values from input text + Data *Tensor // frequency (occurence) of byte value from input text + CharForLabel []rune // unique rune values from input text } // TextDataIter is a text data interator type TextDataIter struct { - Data Tensor + Data *Tensor SeqLen int64 BatchIndex int64 BatchSize int64 - Indexes Tensor + Indexes *Tensor IndexesLen int64 } @@ -179,17 +183,17 @@ type TextDataIter struct { // will labelled with new label(index) // Data: tensor of labels // CharForLabel: []rune (unique runes from text input) -func NewTextData(filename string) (retVal TextData, err error) { +func NewTextData(filename string) (*TextData, error) { filePath, err := filepath.Abs(filename) if err != nil { - return retVal, err + return nil, err } r, err := os.Open(filePath) buffer, err := ioutil.ReadAll(r) if err != nil { - return retVal, err + return nil, err } var labelForChar map[byte]uint8 = make(map[byte]uint8, 0) @@ -216,35 +220,35 @@ func NewTextData(filename string) (retVal TextData, err error) { data := MustOfSlice(dataIndexes) - return TextData{ + return &TextData{ Data: data, CharForLabel: charForLabel, }, nil } // Labels returns the number of different `character` (rune) used by the dataset. -func (td TextData) Labels() (retVal int64) { +func (td *TextData) Labels() (retVal int64) { return int64(len(td.CharForLabel)) } // Data returns a shallow copy of the data. -func (td TextData) CloneData() (retVal Tensor) { +func (td *TextData) CloneData() *Tensor { return td.Data.MustShallowClone() } // LabelForChar returns a corresponding `char` (rune) for // specified label input -func (td TextData) LabelForChar(label int64) (retVal rune) { +func (td *TextData) LabelForChar(label int64) rune { return td.CharForLabel[int(label)] } // IterShuffle returns a batch iterator over the dataset. // Each sample is made of seq_len characters. -func (td TextData) IterShuffle(seqLen int64, batchSize int64) (retVal TextDataIter) { +func (td *TextData) IterShuffle(seqLen int64, batchSize int64) *TextDataIter { indexesLen := td.Data.MustSize()[0] - seqLen + 1 - return TextDataIter{ + return &TextDataIter{ Data: td.Data.MustShallowClone(), SeqLen: seqLen, BatchIndex: 0, @@ -255,12 +259,12 @@ func (td TextData) IterShuffle(seqLen int64, batchSize int64) (retVal TextDataIt } // Next implements iterator for TextDataIter -func (tdi *TextDataIter) Next() (retVal Tensor, ok bool) { +func (tdi *TextDataIter) Next() (*Tensor, bool) { start := tdi.BatchIndex * tdi.BatchSize size := min(tdi.BatchSize, tdi.IndexesLen-start) if size < tdi.BatchSize { - return retVal, false + return nil, false } tdi.BatchIndex += 1 @@ -276,10 +280,10 @@ func (tdi *TextDataIter) Next() (retVal Tensor, ok bool) { for _, idx := range indexes { narrowIdx := NewNarrow(idx, idx+tdi.SeqLen) idxTs := tdi.Data.Idx(narrowIdx) - batch = append(batch, idxTs) + batch = append(batch, *idxTs) } - retVal = MustStack(batch, 0) + retVal := MustStack(batch, 0) // Delete intermediate tensors for _, xs := range batch { @@ -289,7 +293,7 @@ func (tdi *TextDataIter) Next() (retVal Tensor, ok bool) { return retVal, true } -func min(v1, v2 int64) (retVal int64) { +func min(v1, v2 int64) int64 { if v1 < v2 { return v1 } diff --git a/tensor/image.go b/tensor/image.go index 839fc55..e78e375 100644 --- a/tensor/image.go +++ b/tensor/image.go @@ -9,22 +9,20 @@ import ( ) // LoadHwc returns a tensor of shape [height, width, channels] on success. -func LoadHwc(path string) (retVal Tensor, err error) { +func LoadHwc(path string) (*Tensor, error) { ctensor := lib.AtLoadImage(path) - err = TorchErr() + err := TorchErr() if err != nil { - return retVal, err + return nil, err } - retVal = Tensor{ctensor} - - return retVal, nil + return &Tensor{ctensor}, nil } // SaveHwc save an image from tensor. It expects a tensor of shape [height, // width, channels] -func SaveHwc(ts Tensor, path string) (err error) { +func SaveHwc(ts *Tensor, path string) error { lib.AtSaveImage(ts.ctensor, path) return TorchErr() @@ -32,14 +30,13 @@ func SaveHwc(ts Tensor, path string) (err error) { // ResizeHwc expects a tensor of shape [height, width, channels]. // On success returns a tensor of shape [height, width, channels]. -func ResizeHwc(ts Tensor, outWidth, outHeight int64) (retVal Tensor, err error) { +func ResizeHwc(ts *Tensor, outWidth, outHeight int64) (*Tensor, error) { ctensor := lib.AtResizeImage(ts.ctensor, outWidth, outHeight) - err = TorchErr() + err := TorchErr() if err != nil { - return retVal, err + return nil, err } - retVal = Tensor{ctensor} - return retVal, nil + return &Tensor{ctensor}, nil } diff --git a/tensor/index.go b/tensor/index.go index 6cfce4c..9c0aaee 100644 --- a/tensor/index.go +++ b/tensor/index.go @@ -79,7 +79,7 @@ type Narrow struct { Start int64 End int64 } -type IndexSelect struct{ Index Tensor } +type IndexSelect struct{ Index *Tensor } type InsertNewAxis struct{} // NewSelect creates an tensor indexer with given index. @@ -93,7 +93,7 @@ func NewNarrow(start, end int64) Narrow { return Narrow{Start: start, End: end} } -func NewIndexSelect(ts Tensor) IndexSelect { +func NewIndexSelect(ts *Tensor) IndexSelect { return IndexSelect{Index: ts} } @@ -130,7 +130,7 @@ type IndexOp interface { // // NOTE: // - `index`: expects type `TensorIndexer` or `[]TensorIndexer` -func (ts *Tensor) Idx(index interface{}) (retVal Tensor) { +func (ts *Tensor) Idx(index interface{}) (retVal *Tensor) { // indexTyp := reflect.TypeOf(index) indexVal := reflect.ValueOf(index) @@ -196,7 +196,7 @@ func (ts *Tensor) Idx(index interface{}) (retVal Tensor) { // Tensor Methods: // =============== -func (ts Tensor) indexer(indexSpec []TensorIndexer) (retVal Tensor, err error) { +func (ts *Tensor) indexer(indexSpec []TensorIndexer) (retVal *Tensor, err error) { // Make sure number of non-newaxis is not exceed number of dimensions var numNewAxis int = 0 @@ -221,7 +221,7 @@ func (ts Tensor) indexer(indexSpec []TensorIndexer) (retVal Tensor, err error) { // If `spec` is `IndexSelect` type and if reflect.TypeOf(spec).Name() == "IndexSelect" { if reflect.ValueOf(spec).Kind() == reflect.Struct { - inputTensor := reflect.ValueOf(spec).FieldByName("Index").Interface().(Tensor) + inputTensor := reflect.ValueOf(spec).FieldByName("Index").Interface().(*Tensor) // 1. Either its input tensor has dimension > 1, throw error. inputTensorShape, err := inputTensor.Size() @@ -249,9 +249,9 @@ func (ts Tensor) indexer(indexSpec []TensorIndexer) (retVal Tensor, err error) { // Now, apply indexing from left to right. var ( - currTensor Tensor = ts.MustShallowClone() - currIdx int64 = 0 - nextTensor Tensor + currTensor *Tensor = ts.MustShallowClone() + currIdx int64 = 0 + nextTensor *Tensor nextIdx int64 ) @@ -282,8 +282,8 @@ func (ts Tensor) indexer(indexSpec []TensorIndexer) (retVal Tensor, err error) { return retVal, err } nextIdx = currIdx + 1 - case "IndexSelect": // 1 field `(Index Tensor)` - indexTensor := reflect.ValueOf(spec).FieldByName("Index").Interface().(Tensor) + case "IndexSelect": // 1 field `(Index *Tensor)` + indexTensor := reflect.ValueOf(spec).FieldByName("Index").Interface().(*Tensor) device, err := currTensor.Device() if err != nil { return retVal, err @@ -307,7 +307,7 @@ func (ts Tensor) indexer(indexSpec []TensorIndexer) (retVal Tensor, err error) { return retVal, nil } -func (ts Tensor) mustIndexer(indexSpec []TensorIndexer) (retVal Tensor) { +func (ts *Tensor) mustIndexer(indexSpec []TensorIndexer) (retVal *Tensor) { retVal, err := ts.indexer(indexSpec) if err != nil { panic(err) diff --git a/tensor/iter.go b/tensor/iter.go index ef7475e..af7465c 100644 --- a/tensor/iter.go +++ b/tensor/iter.go @@ -14,27 +14,27 @@ type Iterator interface { type Iterable struct { Index int64 Len int64 - Content Tensor + Content *Tensor ItemKind gotch.DType } // Next implements Iterator interface -func (it *Iterable) Next() (retVal interface{}, ok bool) { +func (it *Iterable) Next() (item interface{}, ok bool) { if it.Index == it.Len { - return retVal, false + return nil, false } var err error switch it.ItemKind.Kind().String() { case "int64": - retVal, err = it.Content.Int64Value([]int64{it.Index}) + item, err = it.Content.Int64Value([]int64{it.Index}) if err != nil { log.Fatal(err) } it.Index += 1 case "float64": - retVal, err = it.Content.Float64Value([]int64{it.Index}) + item, err = it.Content.Float64Value([]int64{it.Index}) if err != nil { log.Fatal(err) } @@ -44,22 +44,22 @@ func (it *Iterable) Next() (retVal interface{}, ok bool) { log.Fatal(err) } - return retVal, true + return item, true } // Iter creates an iterable object with specified item type. -func (ts Tensor) Iter(dtype gotch.DType) (retVal Iterable, err error) { +func (ts *Tensor) Iter(dtype gotch.DType) (*Iterable, error) { num, err := ts.Size1() // size for 1D tensor if err != nil { - return retVal, err + return nil, err } tmp, err := ts.ShallowClone() if err != nil { - return retVal, err + return nil, err } content := tmp.MustTotype(dtype, true) - return Iterable{ + return &Iterable{ Index: 0, Len: num, Content: content, diff --git a/tensor/jit.go b/tensor/jit.go index 0e5af91..3e13d86 100644 --- a/tensor/jit.go +++ b/tensor/jit.go @@ -950,7 +950,7 @@ func ModuleLoadDataOnDevice(stream io.Reader, device gotch.Device) (retVal CModu } // Performs the forward pass for a model on some specified tensor inputs. -func (cm CModule) ForwardTs(tensors []Tensor) (retVal Tensor, err error) { +func (cm CModule) ForwardTs(tensors []Tensor) (retVal *Tensor, err error) { var ctensors []lib.Ctensor for _, t := range tensors { ctensors = append(ctensors, t.ctensor) @@ -994,7 +994,7 @@ func (cm CModule) ForwardTs(tensors []Tensor) (retVal Tensor, err error) { return retVal, err } - return Tensor{ctensor}, nil + return &Tensor{ctensor}, nil } // Performs the forward pass for a model on some specified ivalue input. @@ -1066,9 +1066,9 @@ func (cm CModule) To(device gotch.Device, kind gotch.DType, nonBlocking bool) { // Implement Module for CModule: // ============================= -func (cm CModule) Forward(tensor Tensor) (retVal Tensor, err error) { +func (cm CModule) Forward(tensor *Tensor) (retVal *Tensor, err error) { - var tensors []Tensor = []Tensor{tensor} + var tensors []Tensor = []Tensor{*tensor} return cm.ForwardTs(tensors) } @@ -1076,7 +1076,7 @@ func (cm CModule) Forward(tensor Tensor) (retVal Tensor, err error) { // ====================================== // Apply forwards tensor itself through a module. -func (ts Tensor) ApplyCModule(m CModule) (retVal Tensor) { +func (ts *Tensor) ApplyCModule(m CModule) (retVal *Tensor) { retVal, err := m.Forward(ts) if err != nil { log.Fatal(err) diff --git a/tensor/jit_test.go b/tensor/jit_test.go index 38d72a2..ffd53bb 100644 --- a/tensor/jit_test.go +++ b/tensor/jit_test.go @@ -59,7 +59,7 @@ func TestModuleForwardTs(t *testing.T) { ts1 := ts.TensorFrom([]int64{42}) ts2 := ts.TensorFrom([]int64{1337}) - res, err := foo.ForwardTs([]ts.Tensor{ts1, ts2}) + res, err := foo.ForwardTs([]ts.Tensor{*ts1, *ts2}) if err != nil { t.Error(err) } @@ -83,8 +83,8 @@ func TestModuleForwardIValue(t *testing.T) { ts1 := ts.TensorFrom([]int64{42}) ts2 := ts.TensorFrom([]int64{1337}) - iv1 := ts.NewIValue(ts1) - iv2 := ts.NewIValue(ts2) + iv1 := ts.NewIValue(*ts1) + iv2 := ts.NewIValue(*ts2) got, err := foo.ForwardIs([]ts.IValue{iv1, iv2}) if err != nil { @@ -93,7 +93,7 @@ func TestModuleForwardIValue(t *testing.T) { expectedTs1 := ts.TensorFrom([]int64{1421}) expectedTs2 := ts.TensorFrom([]int64{-1295}) - want := ts.NewIValue([]ts.Tensor{expectedTs1, expectedTs2}) + want := ts.NewIValue([]ts.Tensor{*expectedTs1, *expectedTs2}) if !reflect.DeepEqual(want.Name(), got.Name()) { t.Errorf("Expected Ivalue Name: %v\n", want.Name()) diff --git a/tensor/module.go b/tensor/module.go index 0c8b857..61bc3b7 100644 --- a/tensor/module.go +++ b/tensor/module.go @@ -9,7 +9,7 @@ package tensor // be registered, and will have their parameters converted too when you call .cuda(), etc. type Module interface { // ModuleT - Forward(xs Tensor) Tensor + Forward(xs *Tensor) *Tensor } // ModuleT is a `Module` with an additional train parameter @@ -17,7 +17,7 @@ type Module interface { // between training and evaluation. E.g. When using dropout or batch-normalization. type ModuleT interface { // Forward(xs Tensor) Tensor - ForwardT(xs Tensor, train bool) Tensor + ForwardT(xs *Tensor, train bool) *Tensor } /* @@ -99,18 +99,18 @@ type ModuleT interface { // ====================================== // Apply forwards tensor itself through a module. -func (ts Tensor) Apply(m Module) (retVal Tensor) { +func (ts *Tensor) Apply(m Module) (retVal *Tensor) { return m.Forward(ts) } // Apply forwards tensor itself through a module T. -func (ts Tensor) ApplyT(m ModuleT, train bool) (retVal Tensor) { +func (ts *Tensor) ApplyT(m ModuleT, train bool) (retVal *Tensor) { return m.ForwardT(ts, train) } // ApplyOpt forwards a tensor itself through a module if given, shallow-copies // the tensor otherwise. -func (ts Tensor) ApplyOpt(opts ...ModuleOption) (retVal Tensor) { +func (ts *Tensor) ApplyOpt(opts ...ModuleOption) (retVal *Tensor) { switch { case len(opts) > 0: @@ -131,7 +131,7 @@ func WithModule(m Module) ModuleOption { // ApplyOptT forwards a tensor itself through a module T if given, shallow-copies // the tensor otherwise. -func (ts Tensor) ApplyOptT(train bool, opts ...ModuleTOption) (retVal Tensor) { +func (ts *Tensor) ApplyOptT(train bool, opts ...ModuleTOption) (retVal *Tensor) { switch { case len(opts) > 0: diff --git a/tensor/must-tensor-generated.go b/tensor/must-tensor-generated.go index dd93c89..1bfcefe 100644 --- a/tensor/must-tensor-generated.go +++ b/tensor/must-tensor-generated.go @@ -2,8042 +2,10050 @@ package tensor // NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! -import( - "log" +import ( + "log" - "github.com/sugarme/gotch" + "github.com/sugarme/gotch" ) +func (ts *Tensor) Must__And_(other *Scalar) { -func(ts Tensor) Must__And_(other Scalar)() { - - err := ts.__And_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__And1(other Tensor)() { - - err := ts.__And1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Iand_(other Scalar)() { - - err := ts.__Iand_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Iand1(other Tensor)() { - - err := ts.__Iand1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Ilshift_(other Scalar)() { - - err := ts.__Ilshift_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Ilshift1(other Tensor)() { - - err := ts.__Ilshift1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Ior_(other Scalar)() { - - err := ts.__Ior_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Ior1(other Tensor)() { - - err := ts.__Ior1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Irshift_(other Scalar)() { - - err := ts.__Irshift_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Irshift1(other Tensor)() { - - err := ts.__Irshift1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Ixor_(other Scalar)() { - - err := ts.__Ixor_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Ixor1(other Tensor)() { - - err := ts.__Ixor1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Lshift_(other Scalar)() { - - err := ts.__Lshift_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Lshift1(other Tensor)() { - - err := ts.__Lshift1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Or_(other Scalar)() { - - err := ts.__Or_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Or1(other Tensor)() { - - err := ts.__Or1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Rshift_(other Scalar)() { - - err := ts.__Rshift_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Rshift1(other Tensor)() { - - err := ts.__Rshift1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Xor_(other Scalar)() { - - err := ts.__Xor_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must__Xor1(other Tensor)() { - - err := ts.__Xor1(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts._AdaptiveAvgPool2d(outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_Addr(vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._Addr(vec1, vec2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_Addr_(vec1 Tensor, vec2 Tensor)() { - - err := ts._Addr_(vec1, vec2) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must_AddrOut(out Tensor, vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._AddrOut(out, vec1, vec2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_AmpUpdateScale(growthTracker Tensor, currentScale Tensor, foundInf Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)(retVal Tensor) { - - retVal, err := _AmpUpdateScale(growthTracker, currentScale, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_BaddbmmMkl_(batch1 Tensor, batch2 Tensor)() { - - err := ts._BaddbmmMkl_(batch1, batch2) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must_CastByte(nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CastByte(nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CastChar(nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CastChar(nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CastDouble(nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CastDouble(nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CastFloat(nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CastFloat(nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CastHalf(nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CastHalf(nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CastInt(nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CastInt(nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CastLong(nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CastLong(nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CastShort(nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CastShort(nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_Cat(tensors []Tensor, dim int64)(retVal Tensor) { - - retVal, err := _Cat(tensors, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_CatOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor) { - - retVal, err := _CatOut(out, tensors, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_CdistBackward(grad Tensor, x1 Tensor, x2 Tensor, p float64, cdist Tensor)(retVal Tensor) { - - retVal, err := _CdistBackward(grad, x1, x2, p, cdist) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CholeskyHelper(upper bool, del bool)(retVal Tensor) { - - retVal, err := ts._CholeskyHelper(upper, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CholeskySolveHelper(a Tensor, upper bool, del bool)(retVal Tensor) { - - retVal, err := ts._CholeskySolveHelper(a, upper, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_Coalesced_(coalesced bool)() { - - err := ts._Coalesced_(coalesced) - if err != nil { log.Fatal(err) } - - return -} - -func Must_Convolution(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal Tensor) { - - retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_ConvolutionNogroup(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64)(retVal Tensor) { - - retVal, err := _ConvolutionNogroup(input, weight, bias, stride, padding, dilation, transposed, outputPadding) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CopyFrom(dst Tensor, nonBlocking bool, del bool)(retVal Tensor) { - - retVal, err := ts._CopyFrom(dst, nonBlocking, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_CtcLossBackward(grad Tensor, logProbs Tensor, targets Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood Tensor, logAlpha Tensor, blank int64, zeroInfinity bool)(retVal Tensor) { - - retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal Tensor) { - - retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, numLayers, batchFirst, bidirectional) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_Cumprod(dim int64, del bool)(retVal Tensor) { - - retVal, err := ts._Cumprod(dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CumprodOut(out Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts._CumprodOut(out, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_Cumsum(dim int64, del bool)(retVal Tensor) { - - retVal, err := ts._Cumsum(dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_CumsumOut(out Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts._CumsumOut(out, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_DimArange(like Tensor, dim int64)(retVal Tensor) { - - retVal, err := _DimArange(like, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_DirichletGrad(x Tensor, alpha Tensor, total Tensor)(retVal Tensor) { - - retVal, err := _DirichletGrad(x, alpha, total) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_EmbeddingBagBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, maximumIndices Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights Tensor)(retVal Tensor) { - - retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_EmbeddingBagDenseBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, maximumIndices Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights Tensor)(retVal Tensor) { - - retVal, err := _EmbeddingBagDenseBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_EmbeddingBagPerSampleWeightsBackward(grad Tensor, weight Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, mode int64)(retVal Tensor) { - - retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_EmbeddingBagSparseBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights Tensor)(retVal Tensor) { - - retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal Tensor) { - - retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_EmptyPerChannelAffineQuantized(size []int64, scales Tensor, zeroPoints Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool)(retVal Tensor) { - - retVal, err := ts._FftWithSize(signalNdim, complexInput, complexOutput, inverse, checkedSignalSizes, normalized, onesided, outputSizes, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_GatherSparseBackward(dim int64, index Tensor, grad Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._GatherSparseBackward(dim, index, grad, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_IndexCopy_(dim int64, index Tensor, source Tensor)() { - - err := ts._IndexCopy_(dim, index, source) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must_IndexPutImpl_(indices []Tensor, values Tensor, accumulate bool, unsafety bool)() { - - err := ts._IndexPutImpl_(indices, values, accumulate, unsafety) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must_Indices(del bool)(retVal Tensor) { - - retVal, err := ts._Indices(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_InverseHelper(del bool)(retVal Tensor) { - - retVal, err := ts._InverseHelper(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal Tensor) { - - retVal, err := ts._LogSoftmax(dim, halfToFloat, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_LogSoftmaxBackwardData(gradOutput Tensor, output Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts._LogSoftmaxBackwardData(gradOutput, output, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_LuSolveHelper(lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._LuSolveHelper(lUData, lUPivots, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_MakePerChannelQuantizedTensor(scale Tensor, zeroPoint Tensor, axis int64, del bool)(retVal Tensor) { - - retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal Tensor) { - - retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_MaskedScale(mask Tensor, scale float64, del bool)(retVal Tensor) { - - retVal, err := ts._MaskedScale(mask, scale, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_MkldnnReshape(shape []int64, del bool)(retVal Tensor) { - - retVal, err := ts._MkldnnReshape(shape, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal Tensor) { - - retVal, err := ts._MkldnnTranspose(dim0, dim1, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64)() { - - err := ts._MkldnnTranspose_(dim0, dim1) - if err != nil { log.Fatal(err) } - - return -} - -func Must_MultinomialAliasDraw(j Tensor, q Tensor, numSamples int64)(retVal Tensor) { - - retVal, err := _MultinomialAliasDraw(j, q, numSamples) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_NnpackSpatialConvolution(input Tensor, weight Tensor, bias Tensor, padding []int64, stride []int64)(retVal Tensor) { - - retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_NnpackSpatialConvolutionBackwardInput(input Tensor, gradOutput Tensor, weight Tensor, padding []int64)(retVal Tensor) { - - retVal, err := _NnpackSpatialConvolutionBackwardInput(input, gradOutput, weight, padding) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_NnpackSpatialConvolutionBackwardWeight(input Tensor, weightsize []int64, gradOutput Tensor, padding []int64)(retVal Tensor) { - - retVal, err := _NnpackSpatialConvolutionBackwardWeight(input, weightsize, gradOutput, padding) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_PackPaddedSequenceBackward(grad Tensor, inputSize []int64, batchSizes Tensor, batchFirst bool)(retVal Tensor) { - - retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_PdistBackward(grad Tensor, p float64, pdist Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._PdistBackward(grad, p, pdist, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_ReshapeFromTensor(shape Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._ReshapeFromTensor(shape, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SWhere(condition Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._SWhere(condition, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SampleDirichlet(del bool)(retVal Tensor) { - - retVal, err := ts._SampleDirichlet(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_ShapeAsTensor(del bool)(retVal Tensor) { - - retVal, err := ts._ShapeAsTensor(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SobolEngineFf_(n int64, sobolstate Tensor, dimension int64, numGenerated int64)() { - - err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must_SobolEngineInitializeState_(dimension int64)() { - - err := ts._SobolEngineInitializeState_(dimension) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must_SobolEngineScramble_(ltm Tensor, dimension int64)() { - - err := ts._SobolEngineScramble_(ltm, dimension) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool)(retVal Tensor) { - - retVal, err := ts._Softmax(dim, halfToFloat, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SoftmaxBackwardData(gradOutput Tensor, output Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts._SoftmaxBackwardData(gradOutput, output, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SparseAddmm(sparse Tensor, dense Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._SparseAddmm(sparse, dense, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_SparseCooTensorUnsafe(indices Tensor, values Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices Tensor, values Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_SparseMm(sparse Tensor, dense Tensor)(retVal Tensor) { - - retVal, err := _SparseMm(sparse, dense) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SparseSum(del bool)(retVal Tensor) { - - retVal, err := ts._SparseSum(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SparseSum1(dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts._SparseSum1(dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SparseSum2(dim []int64, del bool)(retVal Tensor) { - - retVal, err := ts._SparseSum2(dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SparseSum3(dim []int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts._SparseSum3(dim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_SparseSumBackward(grad Tensor, dim []int64, del bool)(retVal Tensor) { - - retVal, err := ts._SparseSumBackward(grad, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_StandardGamma(del bool)(retVal Tensor) { - - retVal, err := ts._StandardGamma(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_StandardGammaGrad(output Tensor, del bool)(retVal Tensor) { - - retVal, err := ts._StandardGammaGrad(output, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_Std(unbiased bool, del bool)(retVal Tensor) { - - retVal, err := ts._Std(unbiased, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_Trilinear(i1 Tensor, i2 Tensor, i3 Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal Tensor) { - - retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_UnsafeView(size []int64, del bool)(retVal Tensor) { - - retVal, err := ts._UnsafeView(size, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_Values(del bool)(retVal Tensor) { - - retVal, err := ts._Values(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) Must_Var(unbiased bool, del bool)(retVal Tensor) { - - retVal, err := ts._Var(unbiased, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func Must_WeightNorm(v Tensor, g Tensor, dim int64)(retVal Tensor) { - - retVal, err := _WeightNorm(v, g, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAbs(del bool)(retVal Tensor) { - - retVal, err := ts.Abs(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAbs_()() { - - err := ts.Abs_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAbsOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AbsOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAcos(del bool)(retVal Tensor) { - - retVal, err := ts.Acos(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAcos_()() { - - err := ts.Acos_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAcosOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AcosOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveAvgPool1d(outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveAvgPool2d(outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveAvgPool2dOut(out Tensor, outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveAvgPool3d(outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveAvgPool3dBackward(gradOutput Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveAvgPool3dBackward(gradOutput, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveAvgPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveAvgPool3dBackwardOut(gradInput, gradOutput, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveAvgPool3dOut(out Tensor, outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveMaxPool2dBackward(gradOutput Tensor, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveMaxPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveMaxPool2dBackwardOut(gradInput, gradOutput, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveMaxPool3dBackward(gradOutput Tensor, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdaptiveMaxPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AdaptiveMaxPool3dBackwardOut(gradInput, gradOutput, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdd(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Add(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdd1(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Add1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAdd_(other Tensor)() { - - err := ts.Add_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAdd1_(other Scalar)() { - - err := ts.Add1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAddOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AddOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddbmm(batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Addbmm(batch1, batch2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddbmm_(batch1 Tensor, batch2 Tensor)() { - - err := ts.Addbmm_(batch1, batch2) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAddbmmOut(out Tensor, batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AddbmmOut(out, batch1, batch2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddcdiv(tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Addcdiv(tensor1, tensor2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddcdiv_(tensor1 Tensor, tensor2 Tensor)() { - - err := ts.Addcdiv_(tensor1, tensor2) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAddcdivOut(out Tensor, tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddcmul(tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Addcmul(tensor1, tensor2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddcmul_(tensor1 Tensor, tensor2 Tensor)() { - - err := ts.Addcmul_(tensor1, tensor2) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAddcmulOut(out Tensor, tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddmm(mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Addmm(mat1, mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddmm_(mat1 Tensor, mat2 Tensor)() { - - err := ts.Addmm_(mat1, mat2) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAddmmOut(out Tensor, mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AddmmOut(out, mat1, mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddmv(mat Tensor, vec Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Addmv(mat, vec, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddmv_(mat Tensor, vec Tensor)() { - - err := ts.Addmv_(mat, vec) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAddmvOut(out Tensor, mat Tensor, vec Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AddmvOut(out, mat, vec, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddr(vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Addr(vec1, vec2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAddr_(vec1 Tensor, vec2 Tensor)() { - - err := ts.Addr_(vec1, vec2) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAddrOut(out Tensor, vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AddrOut(out, vec1, vec2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustAffineGridGenerator(theta Tensor, size []int64, alignCorners bool)(retVal Tensor) { - - retVal, err := AffineGridGenerator(theta, size, alignCorners) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustAffineGridGeneratorBackward(grad Tensor, size []int64, alignCorners bool)(retVal Tensor) { - - retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAlias(del bool)(retVal Tensor) { - - retVal, err := ts.Alias(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAlignAs(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AlignAs(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAll(del bool)(retVal Tensor) { - - retVal, err := ts.All(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAll1(dim int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.All1(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAllOut(out Tensor, dim int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.AllOut(out, dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustAlphaDropout(input Tensor, p float64, train bool)(retVal Tensor) { - - retVal, err := AlphaDropout(input, p, train) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAlphaDropout_(p float64, train bool)() { - - err := ts.AlphaDropout_(p, train) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAngle(del bool)(retVal Tensor) { - - retVal, err := ts.Angle(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAngleOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AngleOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAny(del bool)(retVal Tensor) { - - retVal, err := ts.Any(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAny1(dim int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.Any1(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAnyOut(out Tensor, dim int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.AnyOut(out, dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustArange(end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Arange(end, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustArange1(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Arange1(start, end, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustArange2(start Scalar, end Scalar, step Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Arange2(start, end, step, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustArangeOut(out Tensor, end Scalar)(retVal Tensor) { - - retVal, err := ArangeOut(out, end) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustArangeOut1(out Tensor, start Scalar, end Scalar)(retVal Tensor) { - - retVal, err := ArangeOut1(out, start, end) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustArgmax(dim int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.Argmax(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustArgmin(dim int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.Argmin(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustArgsort(dim int64, descending bool, del bool)(retVal Tensor) { - - retVal, err := ts.Argsort(dim, descending, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAsStrided(size []int64, stride []int64, storageOffset int64, del bool)(retVal Tensor) { - - retVal, err := ts.AsStrided(size, stride, storageOffset, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset int64)() { - - err := ts.AsStrided_(size, stride, storageOffset) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAsin(del bool)(retVal Tensor) { - - retVal, err := ts.Asin(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAsin_()() { - - err := ts.Asin_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAsinOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AsinOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAtan(del bool)(retVal Tensor) { - - retVal, err := ts.Atan(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAtan2(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Atan2(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAtan2_(other Tensor)() { - - err := ts.Atan2_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAtan2Out(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Atan2Out(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAtan_()() { - - err := ts.Atan_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustAtanOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.AtanOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool2dBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool2dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool2dOut(out Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool3dBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool3dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustAvgPool3dOut(out Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { - - retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBaddbmm(batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Baddbmm(batch1, batch2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBaddbmm_(batch1 Tensor, batch2 Tensor)() { - - err := ts.Baddbmm_(batch1, batch2) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBaddbmmOut(out Tensor, batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BaddbmmOut(out, batch1, batch2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBartlettWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := BartlettWindow1(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBatchNorm(input Tensor, weight Tensor, bias Tensor, runningMean Tensor, runningVar Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal Tensor) { - - retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBatchNormBackwardElemt(gradOut Tensor, input Tensor, mean Tensor, invstd Tensor, weight Tensor, meanDy Tensor, meanDyXmu Tensor)(retVal Tensor) { - - retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBatchNormElemt(input Tensor, weight Tensor, bias Tensor, mean Tensor, invstd Tensor, eps float64)(retVal Tensor) { - - retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBatchNormElemtOut(out Tensor, input Tensor, weight Tensor, bias Tensor, mean Tensor, invstd Tensor, eps float64)(retVal Tensor) { - - retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBernoulli(del bool)(retVal Tensor) { - - retVal, err := ts.Bernoulli(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBernoulli1(p float64, del bool)(retVal Tensor) { - - retVal, err := ts.Bernoulli1(p, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBernoulli_(p Tensor)() { - - err := ts.Bernoulli_(p) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBernoulli1_(p float64)() { - - err := ts.Bernoulli1_(p) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBernoulliOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BernoulliOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBilinear(input1 Tensor, input2 Tensor, weight Tensor, bias Tensor)(retVal Tensor) { - - retVal, err := Bilinear(input1, input2, weight, bias) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBinaryCrossEntropy(target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBinaryCrossEntropyBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBinaryCrossEntropyBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.BinaryCrossEntropyBackwardOut(gradInput, gradOutput, target, weight, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBinaryCrossEntropyOut(out Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBinaryCrossEntropyWithLogits(target Tensor, weight Tensor, posWeight Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBinaryCrossEntropyWithLogitsBackward(gradOutput Tensor, target Tensor, weight Tensor, posWeight Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.BinaryCrossEntropyWithLogitsBackward(gradOutput, target, weight, posWeight, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBincount(weights Tensor, minlength int64, del bool)(retVal Tensor) { - - retVal, err := ts.Bincount(weights, minlength, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseAnd(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseAnd(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseAnd1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseAnd1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseAnd_(other Scalar)() { - - err := ts.BitwiseAnd_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBitwiseAnd1_(other Tensor)() { - - err := ts.BitwiseAnd1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBitwiseAndOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseAndOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseAndOut1(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseAndOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseNot(del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseNot(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseNot_()() { - - err := ts.BitwiseNot_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBitwiseNotOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseNotOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseOr(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseOr(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseOr1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseOr1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseOr_(other Scalar)() { - - err := ts.BitwiseOr_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBitwiseOr1_(other Tensor)() { - - err := ts.BitwiseOr1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBitwiseOrOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseOrOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseOrOut1(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseOrOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseXor(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseXor(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseXor1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseXor1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseXor_(other Scalar)() { - - err := ts.BitwiseXor_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBitwiseXor1_(other Tensor)() { - - err := ts.BitwiseXor1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustBitwiseXorOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseXorOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBitwiseXorOut1(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.BitwiseXorOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustBlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := BlackmanWindow1(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBmm(mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Bmm(mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustBmmOut(out Tensor, mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.BmmOut(out, mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCartesianProd(tensors []Tensor)(retVal Tensor) { - - retVal, err := CartesianProd(tensors) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCat(tensors []Tensor, dim int64)(retVal Tensor) { - - retVal, err := Cat(tensors, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCatOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor) { - - retVal, err := CatOut(out, tensors, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCauchy_(median float64, sigma float64)() { - - err := ts.Cauchy_(median, sigma) - if err != nil { log.Fatal(err) } - - return -} - -func MustCdist(x1 Tensor, x2 Tensor, p float64, computeMode int64)(retVal Tensor) { - - retVal, err := Cdist(x1, x2, p, computeMode) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCeil(del bool)(retVal Tensor) { - - retVal, err := ts.Ceil(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCeil_()() { - - err := ts.Ceil_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustCeilOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.CeilOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCelu(del bool)(retVal Tensor) { - - retVal, err := ts.Celu(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCelu_()() { - - err := ts.Celu_() - if err != nil { log.Fatal(err) } - - return -} - -func MustChainMatmul(matrices []Tensor)(retVal Tensor) { - - retVal, err := ChainMatmul(matrices) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCholesky(upper bool, del bool)(retVal Tensor) { - - retVal, err := ts.Cholesky(upper, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCholeskyInverse(upper bool, del bool)(retVal Tensor) { - - retVal, err := ts.CholeskyInverse(upper, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCholeskyInverseOut(out Tensor, upper bool, del bool)(retVal Tensor) { - - retVal, err := ts.CholeskyInverseOut(out, upper, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCholeskyOut(out Tensor, upper bool, del bool)(retVal Tensor) { - - retVal, err := ts.CholeskyOut(out, upper, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCholeskySolve(input2 Tensor, upper bool, del bool)(retVal Tensor) { - - retVal, err := ts.CholeskySolve(input2, upper, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCholeskySolveOut(out Tensor, input2 Tensor, upper bool, del bool)(retVal Tensor) { - - retVal, err := ts.CholeskySolveOut(out, input2, upper, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustClamp(min Scalar, max Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Clamp(min, max, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustClamp_(min Scalar, max Scalar)() { - - err := ts.Clamp_(min, max) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustClampMax(max Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.ClampMax(max, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustClampMax_(max Scalar)() { - - err := ts.ClampMax_(max) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustClampMaxOut(out Tensor, max Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.ClampMaxOut(out, max, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustClampMin(min Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.ClampMin(min, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustClampMin_(min Scalar)() { - - err := ts.ClampMin_(min) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustClampMinOut(out Tensor, min Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.ClampMinOut(out, min, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustClampOut(out Tensor, min Scalar, max Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.ClampOut(out, min, max, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCoalesce(del bool)(retVal Tensor) { - - retVal, err := ts.Coalesce(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCol2imBackward(gradOutput Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor) { - - retVal, err := Col2imBackward(gradOutput, kernelSize, dilation, padding, stride) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCol2imBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor) { - - retVal, err := Col2imBackwardOut(gradInput, gradOutput, kernelSize, dilation, padding, stride) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCol2imOut(out Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCombinations(r int64, withReplacement bool, del bool)(retVal Tensor) { - - retVal, err := ts.Combinations(r, withReplacement, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustConj(del bool)(retVal Tensor) { - - retVal, err := ts.Conj(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustConjOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ConjOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustConstantPadNd(pad []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ConstantPadNd(pad, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustContiguous(del bool)(retVal Tensor) { - - retVal, err := ts.Contiguous(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustConv1d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor) { - - retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustConv2d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor) { - - retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustConv3d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor) { - - retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustConvTbc(weight Tensor, bias Tensor, pad int64, del bool)(retVal Tensor) { - - retVal, err := ts.ConvTbc(weight, bias, pad, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustConvTranspose1d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor) { - - retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustConvTranspose2d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor) { - - retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustConvTranspose3d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor) { - - retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustConvolution(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal Tensor) { - - retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustConvolutionOverrideable(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal Tensor) { - - retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCopySparseToSparse_(src Tensor, nonBlocking bool)() { - - err := ts.CopySparseToSparse_(src, nonBlocking) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustCos(del bool)(retVal Tensor) { - - retVal, err := ts.Cos(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCos_()() { - - err := ts.Cos_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustCosOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.CosOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCosh(del bool)(retVal Tensor) { - - retVal, err := ts.Cosh(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCosh_()() { - - err := ts.Cosh_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustCoshOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.CoshOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCosineEmbeddingLoss(input1 Tensor, input2 Tensor, target Tensor, margin float64, reduction int64)(retVal Tensor) { - - retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCosineSimilarity(x1 Tensor, x2 Tensor, dim int64, eps float64)(retVal Tensor) { - - retVal, err := CosineSimilarity(x1, x2, dim, eps) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCross(other Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.Cross(other, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCrossOut(out Tensor, other Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.CrossOut(out, other, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCtcLoss(logProbs Tensor, targets Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal Tensor) { - - retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCtcLoss1(logProbs Tensor, targets Tensor, inputLengths Tensor, targetLengths Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal Tensor) { - - retVal, err := CtcLoss1(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCudnnAffineGridGenerator(theta Tensor, n int64, c int64, h int64, w int64)(retVal Tensor) { - - retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCudnnAffineGridGeneratorBackward(grad Tensor, n int64, c int64, h int64, w int64)(retVal Tensor) { - - retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCudnnConvolution(weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCudnnConvolution1(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.CudnnConvolution1(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCudnnConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { - - retVal, err := CudnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCudnnConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.CudnnConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCudnnConvolutionTranspose(weight Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCudnnConvolutionTranspose1(weight Tensor, bias Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.CudnnConvolutionTranspose1(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustCudnnConvolutionTransposeBackwardInput(gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { - - retVal, err := CudnnConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.CudnnConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCudnnGridSampler(grid Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.CudnnGridSampler(grid, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Cumprod(dim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCumprodOut(out Tensor, dim int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.CumprodOut(out, dim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Cumsum(dim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustCumsumOut(out Tensor, dim int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.CumsumOut(out, dim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustData(del bool)(retVal Tensor) { - - retVal, err := ts.Data(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDequantize(del bool)(retVal Tensor) { - - retVal, err := ts.Dequantize(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDet(del bool)(retVal Tensor) { - - retVal, err := ts.Det(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDetach(del bool)(retVal Tensor) { - - retVal, err := ts.Detach(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDetach_()() { - - err := ts.Detach_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustDiag(diagonal int64, del bool)(retVal Tensor) { - - retVal, err := ts.Diag(diagonal, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal Tensor) { - - retVal, err := ts.DiagEmbed(offset, dim1, dim2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDiagOut(out Tensor, diagonal int64, del bool)(retVal Tensor) { - - retVal, err := ts.DiagOut(out, diagonal, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDiagflat(offset int64, del bool)(retVal Tensor) { - - retVal, err := ts.Diagflat(offset, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal Tensor) { - - retVal, err := ts.Diagonal(offset, dim1, dim2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDigamma(del bool)(retVal Tensor) { - - retVal, err := ts.Digamma(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDigamma_()() { - - err := ts.Digamma_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustDigammaOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.DigammaOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDist(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Dist(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDiv(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Div(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDiv1(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Div1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDiv_(other Tensor)() { - - err := ts.Div_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustDiv1_(other Scalar)() { - - err := ts.Div1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustDivOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.DivOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDot(tensor Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Dot(tensor, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDotOut(out Tensor, tensor Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.DotOut(out, tensor, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustDropout(input Tensor, p float64, train bool)(retVal Tensor) { - - retVal, err := Dropout(input, p, train) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustDropout_(p float64, train bool)() { - - err := ts.Dropout_(p, train) - if err != nil { log.Fatal(err) } - - return -} - -func MustEinsum(equation string, tensors []Tensor)(retVal Tensor) { - - retVal, err := Einsum(equation, tensors) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustElu(del bool)(retVal Tensor) { - - retVal, err := ts.Elu(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustElu_()() { - - err := ts.Elu_() - if err != nil { log.Fatal(err) } - - return -} - -func MustEluBackward(gradOutput Tensor, alpha Scalar, scale Scalar, inputScale Scalar, output Tensor)(retVal Tensor) { - - retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, output) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEluBackwardOut(gradInput Tensor, gradOutput Tensor, alpha Scalar, scale Scalar, inputScale Scalar, output Tensor)(retVal Tensor) { - - retVal, err := EluBackwardOut(gradInput, gradOutput, alpha, scale, inputScale, output) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustEluOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.EluOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEmbedding(weight Tensor, indices Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal Tensor) { - - retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEmbeddingBackward(grad Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal Tensor) { - - retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEmbeddingDenseBackward(gradOutput Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal Tensor) { - - retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustEmbeddingRenorm_(indices Tensor, maxNorm float64, normType float64)() { - - err := ts.EmbeddingRenorm_(indices, maxNorm, normType) - if err != nil { log.Fatal(err) } - - return -} - -func MustEmbeddingSparseBackward(grad Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal Tensor) { - - retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Empty(size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustEmptyLike(del bool)(retVal Tensor) { - - retVal, err := ts.EmptyLike(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEmptyOut(out Tensor, size []int64)(retVal Tensor) { - - retVal, err := EmptyOut(out, size) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustEq(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Eq(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustEq1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Eq1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustEq_(other Scalar)() { - - err := ts.Eq_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustEq1_(other Tensor)() { - - err := ts.Eq1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustEqOut(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.EqOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustEqOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.EqOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustErf(del bool)(retVal Tensor) { - - retVal, err := ts.Erf(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustErf_()() { - - err := ts.Erf_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustErfOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ErfOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustErfc(del bool)(retVal Tensor) { - - retVal, err := ts.Erfc(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustErfc_()() { - - err := ts.Erfc_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustErfcOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ErfcOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustErfinv(del bool)(retVal Tensor) { - - retVal, err := ts.Erfinv(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustErfinv_()() { - - err := ts.Erfinv_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustErfinvOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ErfinvOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustExp(del bool)(retVal Tensor) { - - retVal, err := ts.Exp(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustExp_()() { - - err := ts.Exp_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustExpOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ExpOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustExpand(size []int64, implicit bool, del bool)(retVal Tensor) { - - retVal, err := ts.Expand(size, implicit, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustExpandAs(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ExpandAs(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustExpm1(del bool)(retVal Tensor) { - - retVal, err := ts.Expm1(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustExpm1_()() { - - err := ts.Expm1_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustExpm1Out(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Expm1Out(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustExponential_(lambd float64)() { - - err := ts.Exponential_(lambd) - if err != nil { log.Fatal(err) } - - return -} - -func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Eye(n, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEye1(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Eye1(n, m, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEyeOut(out Tensor, n int64)(retVal Tensor) { - - retVal, err := EyeOut(out, n) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustEyeOut1(out Tensor, n int64, m int64)(retVal Tensor) { - - retVal, err := EyeOut1(out, n, m) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFakeQuantizePerChannelAffine(scale Tensor, zeroPoint Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal Tensor) { - - retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFakeQuantizePerChannelAffineBackward(grad Tensor, scale Tensor, zeroPoint Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal Tensor) { - - retVal, err := ts.FakeQuantizePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal Tensor) { - - retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFakeQuantizePerTensorAffineBackward(grad Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal Tensor) { - - retVal, err := ts.FakeQuantizePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFbgemmLinearFp16Weight(input Tensor, packedWeight Tensor, bias Tensor)(retVal Tensor) { - - retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFbgemmLinearFp16WeightFp32Activation(input Tensor, packedWeight Tensor, bias Tensor)(retVal Tensor) { - - retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFbgemmLinearInt8Weight(input Tensor, weight Tensor, packed Tensor, colOffsets Tensor, weightScale Scalar, weightZeroPoint Scalar, bias Tensor)(retVal Tensor) { - - retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFbgemmLinearInt8WeightFp32Activation(input Tensor, weight Tensor, packed Tensor, colOffsets Tensor, weightScale Scalar, weightZeroPoint Scalar, bias Tensor)(retVal Tensor) { - - retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFbgemmPackGemmMatrixFp16(input Tensor)(retVal Tensor) { - - retVal, err := FbgemmPackGemmMatrixFp16(input) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFbgemmPackQuantizedMatrix(input Tensor)(retVal Tensor) { - - retVal, err := FbgemmPackQuantizedMatrix(input) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFbgemmPackQuantizedMatrix1(input Tensor, k int64, n int64)(retVal Tensor) { - - retVal, err := FbgemmPackQuantizedMatrix1(input, k, n) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFeatureAlphaDropout(input Tensor, p float64, train bool)(retVal Tensor) { - - retVal, err := FeatureAlphaDropout(input, p, train) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFeatureAlphaDropout_(p float64, train bool)() { - - err := ts.FeatureAlphaDropout_(p, train) - if err != nil { log.Fatal(err) } - - return -} - -func MustFeatureDropout(input Tensor, p float64, train bool)(retVal Tensor) { - - retVal, err := FeatureDropout(input, p, train) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFeatureDropout_(p float64, train bool)() { - - err := ts.FeatureDropout_(p, train) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFft(signalNdim int64, normalized bool, del bool)(retVal Tensor) { - - retVal, err := ts.Fft(signalNdim, normalized, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFill_(value Scalar)() { - - err := ts.Fill_(value) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFill1_(value Tensor)() { - - err := ts.Fill1_(value) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFillDiagonal_(fillValue Scalar, wrap bool)() { - - err := ts.FillDiagonal_(fillValue, wrap) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFlatten(startDim int64, endDim int64, del bool)(retVal Tensor) { - - retVal, err := ts.Flatten(startDim, endDim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFlip(dims []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Flip(dims, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFloor(del bool)(retVal Tensor) { - - retVal, err := ts.Floor(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFloor_()() { - - err := ts.Floor_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFloorDivide(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FloorDivide(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFloorDivide1(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.FloorDivide1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFloorDivide_(other Tensor)() { - - err := ts.FloorDivide_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFloorDivide1_(other Scalar)() { - - err := ts.FloorDivide1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFloorDivideOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FloorDivideOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFloorOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FloorOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFmod(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Fmod(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFmod1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Fmod1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFmod_(other Scalar)() { - - err := ts.Fmod_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFmod1_(other Tensor)() { - - err := ts.Fmod1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFmodOut(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.FmodOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFmodOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FmodOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFrac(del bool)(retVal Tensor) { - - retVal, err := ts.Frac(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFrac_()() { - - err := ts.Frac_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustFracOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FracOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFractionalMaxPool2dBackward(gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFractionalMaxPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FractionalMaxPool2dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFractionalMaxPool3dBackward(gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFractionalMaxPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.FractionalMaxPool3dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFrobeniusNorm(del bool)(retVal Tensor) { - - retVal, err := ts.FrobeniusNorm(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFrobeniusNorm1(dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.FrobeniusNorm1(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFrobeniusNormOut(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFromFile(filename string, shared bool, size int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFull(size []int64, fillValue Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Full(size, fillValue, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustFullLike(fillValue Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.FullLike(fillValue, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustFullOut(out Tensor, size []int64, fillValue Scalar)(retVal Tensor) { - - retVal, err := FullOut(out, size, fillValue) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGather(dim int64, index Tensor, sparseGrad bool, del bool)(retVal Tensor) { - - retVal, err := ts.Gather(dim, index, sparseGrad, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGatherOut(out Tensor, dim int64, index Tensor, sparseGrad bool, del bool)(retVal Tensor) { - - retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGe(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Ge(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGe1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Ge1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGe_(other Scalar)() { - - err := ts.Ge_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustGe1_(other Tensor)() { - - err := ts.Ge1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustGeOut(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.GeOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGeOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.GeOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGelu(del bool)(retVal Tensor) { - - retVal, err := ts.Gelu(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGeluBackward(grad Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.GeluBackward(grad, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGeometric_(p float64)() { - - err := ts.Geometric_(p) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustGer(vec2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Ger(vec2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGerOut(out Tensor, vec2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.GerOut(out, vec2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGlu(dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.Glu(dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGluBackward(gradOutput Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.GluBackward(gradOutput, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGluBackwardOut(gradInput Tensor, gradOutput Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.GluBackwardOut(gradInput, gradOutput, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGluOut(out Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.GluOut(out, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGrad(del bool)(retVal Tensor) { - - retVal, err := ts.Grad(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustGridSampler(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor) { - - retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustGridSampler2d(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor) { - - retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustGridSampler3d(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor) { - - retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustGroupNorm(input Tensor, numGroups int64, weight Tensor, bias Tensor, eps float64, cudnnEnabled bool)(retVal Tensor) { - - retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustGruCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor) { - - retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGt(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Gt(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGt1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Gt1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGt_(other Scalar)() { - - err := ts.Gt_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustGt1_(other Tensor)() { - - err := ts.Gt1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustGtOut(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.GtOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustGtOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.GtOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustHammingWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := HammingWindow1(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustHammingWindow2(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := HammingWindow2(windowLength, periodic, alpha, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustHammingWindow3(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := HammingWindow3(windowLength, periodic, alpha, beta, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := HannWindow(windowLength, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustHannWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := HannWindow1(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardshrink(del bool)(retVal Tensor) { - - retVal, err := ts.Hardshrink(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardshrinkBackward(gradOut Tensor, lambd Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.HardshrinkBackward(gradOut, lambd, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardsigmoid(del bool)(retVal Tensor) { - - retVal, err := ts.Hardsigmoid(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardsigmoid_()() { - - err := ts.Hardsigmoid_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustHardsigmoidBackward(gradOutput Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.HardsigmoidBackward(gradOutput, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardsigmoidOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.HardsigmoidOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardtanh(del bool)(retVal Tensor) { - - retVal, err := ts.Hardtanh(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardtanh_()() { - - err := ts.Hardtanh_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustHardtanhBackward(gradOutput Tensor, minVal Scalar, maxVal Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardtanhBackwardOut(gradInput Tensor, gradOutput Tensor, minVal Scalar, maxVal Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.HardtanhBackwardOut(gradInput, gradOutput, minVal, maxVal, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHardtanhOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.HardtanhOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHingeEmbeddingLoss(target Tensor, margin float64, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHistc(bins int64, del bool)(retVal Tensor) { - - retVal, err := ts.Histc(bins, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustHistcOut(out Tensor, bins int64, del bool)(retVal Tensor) { - - retVal, err := ts.HistcOut(out, bins, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustHspmm(mat1 Tensor, mat2 Tensor)(retVal Tensor) { - - retVal, err := Hspmm(mat1, mat2) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustHspmmOut(out Tensor, mat1 Tensor, mat2 Tensor)(retVal Tensor) { - - retVal, err := HspmmOut(out, mat1, mat2) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIfft(signalNdim int64, normalized bool, del bool)(retVal Tensor) { - - retVal, err := ts.Ifft(signalNdim, normalized, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustIm2colBackward(gradOutput Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor) { - - retVal, err := Im2colBackward(gradOutput, inputSize, kernelSize, dilation, padding, stride) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustIm2colBackwardOut(gradInput Tensor, gradOutput Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor) { - - retVal, err := Im2colBackwardOut(gradInput, gradOutput, inputSize, kernelSize, dilation, padding, stride) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIm2colOut(out Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustImag(del bool)(retVal Tensor) { - - retVal, err := ts.Imag(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndex(indices []Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Index(indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndexAdd(dim int64, index Tensor, source Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.IndexAdd(dim, index, source, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndexAdd_(dim int64, index Tensor, source Tensor)() { - - err := ts.IndexAdd_(dim, index, source) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustIndexCopy(dim int64, index Tensor, source Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.IndexCopy(dim, index, source, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndexCopy_(dim int64, index Tensor, source Tensor)() { - - err := ts.IndexCopy_(dim, index, source) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustIndexFill(dim int64, index Tensor, value Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.IndexFill(dim, index, value, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndexFill1(dim int64, index Tensor, value Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.IndexFill1(dim, index, value, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndexFill_(dim int64, index Tensor, value Scalar)() { - - err := ts.IndexFill_(dim, index, value) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustIndexFill1_(dim int64, index Tensor, value Tensor)() { - - err := ts.IndexFill1_(dim, index, value) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustIndexPut(indices []Tensor, values Tensor, accumulate bool, del bool)(retVal Tensor) { - - retVal, err := ts.IndexPut(indices, values, accumulate, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndexPut_(indices []Tensor, values Tensor, accumulate bool)() { - - err := ts.IndexPut_(indices, values, accumulate) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustIndexSelect(dim int64, index Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.IndexSelect(dim, index, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndexSelectOut(out Tensor, dim int64, index Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.IndexSelectOut(out, dim, index, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIndices(del bool)(retVal Tensor) { - - retVal, err := ts.Indices(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustInstanceNorm(input Tensor, weight Tensor, bias Tensor, runningMean Tensor, runningVar Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal Tensor) { - - retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIntRepr(del bool)(retVal Tensor) { - - retVal, err := ts.IntRepr(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustInverse(del bool)(retVal Tensor) { - - retVal, err := ts.Inverse(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustInverseOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.InverseOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIrfft(signalNdim int64, normalized bool, onesided bool, signalSizes []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Irfft(signalNdim, normalized, onesided, signalSizes, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIsclose(other Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal Tensor) { - - retVal, err := ts.Isclose(other, rtol, atol, equalNan, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIsfinite(del bool)(retVal Tensor) { - - retVal, err := ts.Isfinite(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIsinf(del bool)(retVal Tensor) { - - retVal, err := ts.Isinf(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustIsnan(del bool)(retVal Tensor) { - - retVal, err := ts.Isnan(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustKlDiv(target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.KlDiv(target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustKlDivBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.KlDivBackward(gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustL1Loss(target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.L1Loss(target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustL1LossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.L1LossBackward(gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustL1LossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.L1LossBackwardOut(gradInput, gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustL1LossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.L1LossOut(out, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustLayerNorm(input Tensor, normalizedShape []int64, weight Tensor, bias Tensor, eps float64, cudnnEnable bool)(retVal Tensor) { - - retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLe(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Le(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLe1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Le1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLe_(other Scalar)() { - - err := ts.Le_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLe1_(other Tensor)() { - - err := ts.Le1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLeOut(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.LeOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLeOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LeOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLeakyRelu(del bool)(retVal Tensor) { - - retVal, err := ts.LeakyRelu(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLeakyRelu_()() { - - err := ts.LeakyRelu_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLeakyReluBackward(gradOutput Tensor, negativeSlope Scalar, selfIsResult bool, del bool)(retVal Tensor) { - - retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLeakyReluOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LeakyReluOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLerp(end Tensor, weight Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Lerp(end, weight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLerp1(end Tensor, weight Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Lerp1(end, weight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLerp_(end Tensor, weight Scalar)() { - - err := ts.Lerp_(end, weight) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLerp1_(end Tensor, weight Tensor)() { - - err := ts.Lerp1_(end, weight) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLerpOut(out Tensor, end Tensor, weight Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.LerpOut(out, end, weight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLerpOut1(out Tensor, end Tensor, weight Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LerpOut1(out, end, weight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLgamma(del bool)(retVal Tensor) { - - retVal, err := ts.Lgamma(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLgamma_()() { - - err := ts.Lgamma_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLgammaOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LgammaOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustLinear(input Tensor, weight Tensor, bias Tensor)(retVal Tensor) { - - retVal, err := Linear(input, weight, bias) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustLinspace(start Scalar, end Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustLinspaceOut(out Tensor, start Scalar, end Scalar, steps int64)(retVal Tensor) { - - retVal, err := LinspaceOut(out, start, end, steps) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLog(del bool)(retVal Tensor) { - - retVal, err := ts.Log(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLog10(del bool)(retVal Tensor) { - - retVal, err := ts.Log10(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLog10_()() { - - err := ts.Log10_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLog10Out(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Log10Out(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLog1p(del bool)(retVal Tensor) { - - retVal, err := ts.Log1p(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLog1p_()() { - - err := ts.Log1p_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLog1pOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Log1pOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLog2(del bool)(retVal Tensor) { - - retVal, err := ts.Log2(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLog2_()() { - - err := ts.Log2_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLog2Out(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Log2Out(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLog_()() { - - err := ts.Log_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLogNormal_(mean float64, std float64)() { - - err := ts.LogNormal_(mean, std) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLogOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogSigmoid(del bool)(retVal Tensor) { - - retVal, err := ts.LogSigmoid(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogSigmoidBackward(gradOutput Tensor, buffer Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogSigmoidBackwardOut(gradInput Tensor, gradOutput Tensor, buffer Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogSigmoidBackwardOut(gradInput, gradOutput, buffer, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogSigmoidOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogSigmoidOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.LogSoftmax(dim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogdet(del bool)(retVal Tensor) { - - retVal, err := ts.Logdet(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogicalAnd(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogicalAnd(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogicalAnd_(other Tensor)() { - - err := ts.LogicalAnd_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLogicalAndOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogicalAndOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogicalNot(del bool)(retVal Tensor) { - - retVal, err := ts.LogicalNot(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogicalNot_()() { - - err := ts.LogicalNot_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLogicalNotOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogicalNotOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogicalOr(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogicalOr(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogicalOr_(other Tensor)() { - - err := ts.LogicalOr_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLogicalOrOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogicalOrOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogicalXor(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogicalXor(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogicalXor_(other Tensor)() { - - err := ts.LogicalXor_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLogicalXorOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LogicalXorOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustLogspace(start Scalar, end Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustLogspaceOut(out Tensor, start Scalar, end Scalar, steps int64, base float64)(retVal Tensor) { - - retVal, err := LogspaceOut(out, start, end, steps, base) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.Logsumexp(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLogsumexpOut(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.LogsumexpOut(out, dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLt(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Lt(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLt1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Lt1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLt_(other Scalar)() { - - err := ts.Lt_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLt1_(other Tensor)() { - - err := ts.Lt1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustLtOut(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.LtOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLtOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LtOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLuSolve(lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LuSolve(lUData, lUPivots, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustLuSolveOut(out Tensor, lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustMarginRankingLoss(input1 Tensor, input2 Tensor, target Tensor, margin float64, reduction int64)(retVal Tensor) { - - retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaskedFill(mask Tensor, value Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.MaskedFill(mask, value, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaskedFill1(mask Tensor, value Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaskedFill1(mask, value, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaskedFill_(mask Tensor, value Scalar)() { - - err := ts.MaskedFill_(mask, value) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustMaskedFill1_(mask Tensor, value Tensor)() { - - err := ts.MaskedFill1_(mask, value) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustMaskedScatter(mask Tensor, source Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaskedScatter(mask, source, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaskedScatter_(mask Tensor, source Tensor)() { - - err := ts.MaskedScatter_(mask, source) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustMaskedSelect(mask Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaskedSelect(mask, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaskedSelectOut(out Tensor, mask Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaskedSelectOut(out, mask, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMatmul(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Matmul(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMatmulOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MatmulOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMatrixPower(n int64, del bool)(retVal Tensor) { - - retVal, err := ts.MatrixPower(n, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMatrixRank(symmetric bool, del bool)(retVal Tensor) { - - retVal, err := ts.MatrixRank(symmetric, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMatrixRank1(tol float64, symmetric bool, del bool)(retVal Tensor) { - - retVal, err := ts.MatrixRank1(tol, symmetric, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMax(del bool)(retVal Tensor) { - - retVal, err := ts.Max(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMax1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Max1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaxOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { - - retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { - - retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxPool2dWithIndicesBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxPool2dWithIndicesBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaxPool2dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { - - retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxPool3dWithIndicesBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxPool3dWithIndicesBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MaxPool3dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxUnpool2d(indices Tensor, outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MaxUnpool2d(indices, outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxUnpool2dBackward(gradOutput Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MaxUnpool2dBackward(gradOutput, indices, outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxUnpool2dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MaxUnpool2dBackwardOut(gradInput, gradOutput, indices, outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxUnpool2dOut(out Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxUnpool3d(indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxUnpool3dBackward(gradOutput Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MaxUnpool3dBackward(gradOutput, indices, outputSize, stride, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxUnpool3dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MaxUnpool3dBackwardOut(gradInput, gradOutput, indices, outputSize, stride, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxUnpool3dOut(out Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMaxValues(dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.MaxValues(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMean(dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Mean(dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMean1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Mean1(dim, keepdim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMeanOut(out Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMedian(del bool)(retVal Tensor) { - - retVal, err := ts.Median(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMin(del bool)(retVal Tensor) { - - retVal, err := ts.Min(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMin1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Min1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMinOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MinOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMinValues(dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.MinValues(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMiopenConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustMiopenConvolutionBackwardBias(gradOutput Tensor)(retVal Tensor) { - - retVal, err := MiopenConvolutionBackwardBias(gradOutput) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustMiopenConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { - - retVal, err := MiopenConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMiopenConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.MiopenConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMiopenConvolutionTranspose(weight Tensor, bias Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustMiopenConvolutionTransposeBackwardInput(gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { - - retVal, err := MiopenConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.MiopenConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMiopenDepthwiseConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustMiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { - - retVal, err := MiopenDepthwiseConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { - - retVal, err := ts.MiopenDepthwiseConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor) { - - retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMkldnnConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal Tensor) { - - retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustMkldnnConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool)(retVal Tensor) { - - retVal, err := MkldnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, biasDefined) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustMkldnnLinear(input Tensor, weight Tensor, bias Tensor)(retVal Tensor) { - - retVal, err := MkldnnLinear(input, weight, bias) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { - - retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal Tensor) { - - retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMm(mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Mm(mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMmOut(out Tensor, mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MmOut(out, mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMseLoss(target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.MseLoss(target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMseLossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMseLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.MseLossBackwardOut(gradInput, gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMseLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.MseLossOut(out, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMul(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Mul(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMul1(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Mul1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMul_(other Tensor)() { - - err := ts.Mul_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustMul1_(other Scalar)() { - - err := ts.Mul1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustMulOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MulOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMultiMarginLossBackward(gradOutput Tensor, target Tensor, p Scalar, margin Scalar, weight Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMultiMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, p Scalar, margin Scalar, weight Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.MultiMarginLossBackwardOut(gradInput, gradOutput, target, p, margin, weight, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMultilabelMarginLoss(target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.MultilabelMarginLoss(target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMultilabelMarginLossBackward(gradOutput Tensor, target Tensor, reduction int64, isTarget Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMultilabelMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, isTarget Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MultilabelMarginLossBackwardOut(gradInput, gradOutput, target, reduction, isTarget, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMultilabelMarginLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMultinomial(numSamples int64, replacement bool, del bool)(retVal Tensor) { - - retVal, err := ts.Multinomial(numSamples, replacement, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMultinomialOut(out Tensor, numSamples int64, replacement bool, del bool)(retVal Tensor) { - - retVal, err := ts.MultinomialOut(out, numSamples, replacement, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMv(vec Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Mv(vec, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMvOut(out Tensor, vec Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.MvOut(out, vec, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMvlgamma(p int64, del bool)(retVal Tensor) { - - retVal, err := ts.Mvlgamma(p, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustMvlgamma_(p int64)() { - - err := ts.Mvlgamma_(p) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustNarrow(dim int64, start int64, length int64, del bool)(retVal Tensor) { - - retVal, err := ts.Narrow(dim, start, length, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNarrow1(dim int64, start Tensor, length int64, del bool)(retVal Tensor) { - - retVal, err := ts.Narrow1(dim, start, length, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool)(retVal Tensor) { - - retVal, err := ts.NarrowCopy(dim, start, length, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNativeNorm(del bool)(retVal Tensor) { - - retVal, err := ts.NativeNorm(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNe(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Ne(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNe1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Ne1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNe_(other Scalar)() { - - err := ts.Ne_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustNe1_(other Tensor)() { - - err := ts.Ne1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustNeOut(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.NeOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNeOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.NeOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNeg(del bool)(retVal Tensor) { - - retVal, err := ts.Neg(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNeg_()() { - - err := ts.Neg_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustNegOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.NegOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor) { - - retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNewFull(size []int64, fillValue Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor) { - - retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor) { - - retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNllLoss(target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor) { - - retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNllLoss2d(target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor) { - - retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNllLoss2dBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNllLoss2dBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.NllLoss2dBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNllLoss2dOut(out Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor) { - - retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNllLossBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNllLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.NllLossBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNllLossOut(out Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor) { - - retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNonzero(del bool)(retVal Tensor) { - - retVal, err := ts.Nonzero(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNonzeroOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.NonzeroOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNorm(del bool)(retVal Tensor) { - - retVal, err := ts.Norm(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNorm1(p Scalar, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Norm1(p, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNorm2(p Scalar, dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.Norm2(p, dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNorm3(p Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Norm3(p, dim, keepdim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustNormExceptDim(v Tensor, pow int64, dim int64)(retVal Tensor) { - - retVal, err := NormExceptDim(v, pow, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNormOut(out Tensor, p Scalar, dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.NormOut(out, p, dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNormOut1(out Tensor, p Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.NormOut1(out, p, dim, keepdim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNormal_(mean float64, std float64)() { - - err := ts.Normal_(mean, std) - if err != nil { log.Fatal(err) } - - return -} - -func MustNormalOut(out Tensor, mean Tensor, std float64)(retVal Tensor) { - - retVal, err := NormalOut(out, mean, std) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustNormalOut1(out Tensor, mean float64, std Tensor)(retVal Tensor) { - - retVal, err := NormalOut1(out, mean, std) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustNormalOut2(out Tensor, mean Tensor, std Tensor)(retVal Tensor) { - - retVal, err := NormalOut2(out, mean, std) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustNormalOut3(out Tensor, mean float64, std float64, size []int64)(retVal Tensor) { - - retVal, err := NormalOut3(out, mean, std, size) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNuclearNorm(keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.NuclearNorm(keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNuclearNorm1(dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.NuclearNorm1(dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNuclearNormOut(out Tensor, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.NuclearNormOut(out, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNuclearNormOut1(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.NuclearNormOut1(out, dim, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustNumpyT(del bool)(retVal Tensor) { - - retVal, err := ts.NumpyT(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustOneHot(numClasses int64, del bool)(retVal Tensor) { - - retVal, err := ts.OneHot(numClasses, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Ones(size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustOnesLike(del bool)(retVal Tensor) { - - retVal, err := ts.OnesLike(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustOnesOut(out Tensor, size []int64)(retVal Tensor) { - - retVal, err := OnesOut(out, size) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustOrgqr(input2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Orgqr(input2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustOrgqrOut(out Tensor, input2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.OrgqrOut(out, input2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustOrmqr(input2 Tensor, input3 Tensor, left bool, transpose bool, del bool)(retVal Tensor) { - - retVal, err := ts.Ormqr(input2, input3, left, transpose, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustOrmqrOut(out Tensor, input2 Tensor, input3 Tensor, left bool, transpose bool, del bool)(retVal Tensor) { - - retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustPairwiseDistance(x1 Tensor, x2 Tensor, p float64, eps float64, keepdim bool)(retVal Tensor) { - - retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPdist(p float64, del bool)(retVal Tensor) { - - retVal, err := ts.Pdist(p, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPermute(dims []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Permute(dims, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPinMemory(del bool)(retVal Tensor) { - - retVal, err := ts.PinMemory(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPinverse(rcond float64, del bool)(retVal Tensor) { - - retVal, err := ts.Pinverse(rcond, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPixelShuffle(upscaleFactor int64, del bool)(retVal Tensor) { - - retVal, err := ts.PixelShuffle(upscaleFactor, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPoisson(del bool)(retVal Tensor) { - - retVal, err := ts.Poisson(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustPoissonNllLoss(input Tensor, target Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal Tensor) { - - retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPolygamma(n int64, del bool)(retVal Tensor) { - - retVal, err := ts.Polygamma(n, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPolygamma_(n int64)() { - - err := ts.Polygamma_(n) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustPolygammaOut(out Tensor, n int64, del bool)(retVal Tensor) { - - retVal, err := ts.PolygammaOut(out, n, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPow(exponent Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Pow(exponent, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPow1(exponent Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Pow1(exponent, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustPow2(selfScalar Scalar, exponent Tensor)(retVal Tensor) { - - retVal, err := Pow2(selfScalar, exponent) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPow_(exponent Scalar)() { - - err := ts.Pow_(exponent) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustPow1_(exponent Tensor)() { - - err := ts.Pow1_(exponent) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustPowOut(out Tensor, exponent Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.PowOut(out, exponent, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPowOut1(out Tensor, exponent Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.PowOut1(out, exponent, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustPowOut2(out Tensor, selfScalar Scalar, exponent Tensor)(retVal Tensor) { - - retVal, err := PowOut2(out, selfScalar, exponent) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPrelu(weight Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Prelu(weight, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustProd(dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Prod(dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustProd1(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Prod1(dim, keepdim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustProdOut(out Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.ProdOut(out, dim, keepdim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustPut_(index Tensor, source Tensor, accumulate bool)() { - - err := ts.Put_(index, source, accumulate) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustQPerChannelScales(del bool)(retVal Tensor) { - - retVal, err := ts.QPerChannelScales(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustQPerChannelZeroPoints(del bool)(retVal Tensor) { - - retVal, err := ts.QPerChannelZeroPoints(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustQuantizePerChannel(scales Tensor, zeroPoints Tensor, axis int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustQuantizedBatchNorm(input Tensor, weight Tensor, bias Tensor, mean Tensor, vari Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal Tensor) { - - retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustQuantizedGruCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor) { - - retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { - - retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustQuantizedRnnReluCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor) { - - retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustQuantizedRnnTanhCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor) { - - retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Rand(size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRandLike(del bool)(retVal Tensor) { - - retVal, err := ts.RandLike(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRandOut(out Tensor, size []int64)(retVal Tensor) { - - retVal, err := RandOut(out, size) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Randint(high, size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRandint1(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Randint1(low, high, size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRandintLike(high int64, del bool)(retVal Tensor) { - - retVal, err := ts.RandintLike(high, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRandintLike1(low int64, high int64, del bool)(retVal Tensor) { - - retVal, err := ts.RandintLike1(low, high, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRandintOut(out Tensor, high int64, size []int64)(retVal Tensor) { - - retVal, err := RandintOut(out, high, size) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRandintOut1(out Tensor, low int64, high int64, size []int64)(retVal Tensor) { - - retVal, err := RandintOut1(out, low, high, size) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Randn(size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRandnLike(del bool)(retVal Tensor) { - - retVal, err := ts.RandnLike(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRandnOut(out Tensor, size []int64)(retVal Tensor) { - - retVal, err := RandnOut(out, size) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRandom_()() { - - err := ts.Random_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRandom1_(to int64)() { - - err := ts.Random1_(to) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRandom2(from int64, to int64)() { - - err := ts.Random2(from, to) - if err != nil { log.Fatal(err) } - - return -} - -func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Randperm(n, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRandpermOut(out Tensor, n int64)(retVal Tensor) { - - retVal, err := RandpermOut(out, n) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRange(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Range(start, end, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRange1(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Range1(start, end, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRangeOut(out Tensor, start Scalar, end Scalar)(retVal Tensor) { - - retVal, err := RangeOut(out, start, end) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReal(del bool)(retVal Tensor) { - - retVal, err := ts.Real(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReciprocal(del bool)(retVal Tensor) { - - retVal, err := ts.Reciprocal(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReciprocal_()() { - - err := ts.Reciprocal_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustReciprocalOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ReciprocalOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReflectionPad1d(padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReflectionPad1d(padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReflectionPad1dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReflectionPad1dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReflectionPad1dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReflectionPad1dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReflectionPad1dOut(out, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReflectionPad2d(padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReflectionPad2d(padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReflectionPad2dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReflectionPad2dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReflectionPad2dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReflectionPad2dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReflectionPad2dOut(out, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRelu(del bool)(retVal Tensor) { - - retVal, err := ts.Relu(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRelu_()() { - - err := ts.Relu_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRemainder(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Remainder(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRemainder1(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Remainder1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRemainder_(other Scalar)() { - - err := ts.Remainder_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRemainder1_(other Tensor)() { - - err := ts.Remainder1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRemainderOut(out Tensor, other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.RemainderOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRemainderOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.RemainderOut1(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRenorm(p Scalar, dim int64, maxnorm Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Renorm(p, dim, maxnorm, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRenorm_(p Scalar, dim int64, maxnorm Scalar)() { - - err := ts.Renorm_(p, dim, maxnorm) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRenormOut(out Tensor, p Scalar, dim int64, maxnorm Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.RenormOut(out, p, dim, maxnorm, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRepeat(repeats []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Repeat(repeats, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRepeatInterleave(repeats Tensor)(retVal Tensor) { - - retVal, err := RepeatInterleave(repeats) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRepeatInterleave1(repeats Tensor, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.RepeatInterleave1(repeats, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRepeatInterleave2(repeats int64, dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.RepeatInterleave2(repeats, dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad1d(padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad1d(padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad1dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad1dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad1dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad1dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad1dOut(out, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad2d(padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad2d(padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad2dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad2dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad2dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad2dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad2dOut(out, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad3d(padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad3d(padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad3dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad3dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad3dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReplicationPad3dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.ReplicationPad3dOut(out, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRequiresGrad_(requiresGrad bool)() { - - err := ts.RequiresGrad_(requiresGrad) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustReshape(shape []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Reshape(shape, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustReshapeAs(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ReshapeAs(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustResize_(size []int64)() { - - err := ts.Resize_(size) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustResizeAs_(theTemplate Tensor)() { - - err := ts.ResizeAs_(theTemplate) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRfft(signalNdim int64, normalized bool, onesided bool, del bool)(retVal Tensor) { - - retVal, err := ts.Rfft(signalNdim, normalized, onesided, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRnnReluCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor) { - - retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustRnnTanhCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor) { - - retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRoll(shifts []int64, dims []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Roll(shifts, dims, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRot90(k int64, dims []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Rot90(k, dims, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRound(del bool)(retVal Tensor) { - - retVal, err := ts.Round(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRound_()() { - - err := ts.Round_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRoundOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.RoundOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRrelu(training bool, del bool)(retVal Tensor) { - - retVal, err := ts.Rrelu(training, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRrelu_(training bool)() { - - err := ts.Rrelu_(training) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRreluWithNoise(noise Tensor, training bool, del bool)(retVal Tensor) { - - retVal, err := ts.RreluWithNoise(noise, training, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRreluWithNoise_(noise Tensor, training bool)() { - - err := ts.RreluWithNoise_(noise, training) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRreluWithNoiseBackward(gradOutput Tensor, noise Tensor, lower Scalar, upper Scalar, training bool, selfIsResult bool, del bool)(retVal Tensor) { - - retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRreluWithNoiseOut(out Tensor, noise Tensor, training bool, del bool)(retVal Tensor) { - - retVal, err := ts.RreluWithNoiseOut(out, noise, training, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRsqrt(del bool)(retVal Tensor) { - - retVal, err := ts.Rsqrt(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRsqrt_()() { - - err := ts.Rsqrt_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustRsqrtOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.RsqrtOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRsub(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Rsub(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustRsub1(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Rsub1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustScalarTensor(s Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := ScalarTensor(s, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustScatter(dim int64, index Tensor, src Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Scatter(dim, index, src, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustScatter1(dim int64, index Tensor, value Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Scatter1(dim, index, value, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustScatter_(dim int64, index Tensor, src Tensor)() { - - err := ts.Scatter_(dim, index, src) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustScatter1_(dim int64, index Tensor, value Scalar)() { - - err := ts.Scatter1_(dim, index, value) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustScatterAdd(dim int64, index Tensor, src Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ScatterAdd(dim, index, src, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustScatterAdd_(dim int64, index Tensor, src Tensor)() { - - err := ts.ScatterAdd_(dim, index, src) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSelect(dim int64, index int64, del bool)(retVal Tensor) { - - retVal, err := ts.Select(dim, index, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSelu(del bool)(retVal Tensor) { - - retVal, err := ts.Selu(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSelu_()() { - - err := ts.Selu_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSet_()() { - - err := ts.Set_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSet1_(source Tensor)() { - - err := ts.Set1_(source) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSetRequiresGrad(r bool, del bool)(retVal Tensor) { - - retVal, err := ts.SetRequiresGrad(r, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSigmoid(del bool)(retVal Tensor) { - - retVal, err := ts.Sigmoid(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSigmoid_()() { - - err := ts.Sigmoid_() - if err != nil { log.Fatal(err) } - - return -} - -func MustSigmoidBackward(gradOutput Tensor, output Tensor)(retVal Tensor) { - - retVal, err := SigmoidBackward(gradOutput, output) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustSigmoidBackwardOut(gradInput Tensor, gradOutput Tensor, output Tensor)(retVal Tensor) { - - retVal, err := SigmoidBackwardOut(gradInput, gradOutput, output) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSigmoidOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SigmoidOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSign(del bool)(retVal Tensor) { - - retVal, err := ts.Sign(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSign_()() { - - err := ts.Sign_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSignOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SignOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSin(del bool)(retVal Tensor) { - - retVal, err := ts.Sin(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSin_()() { - - err := ts.Sin_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSinOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SinOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSinh(del bool)(retVal Tensor) { - - retVal, err := ts.Sinh(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSinh_()() { - - err := ts.Sinh_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSinhOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SinhOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlice(dim int64, start int64, end int64, step int64, del bool)(retVal Tensor) { - - retVal, err := ts.Slice(dim, start, end, step, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlowConv3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlowConv3dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlowConvDilated2d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlowConvDilated3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlowConvTranspose2d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlowConvTranspose2dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlowConvTranspose3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSlowConvTranspose3dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSmm(mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Smm(mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSmoothL1Loss(target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.SmoothL1Loss(target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSmoothL1LossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSmoothL1LossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.SmoothL1LossBackwardOut(gradInput, gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSmoothL1LossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.SmoothL1LossOut(out, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftMarginLoss(target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.SoftMarginLoss(target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftMarginLossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.SoftMarginLossBackwardOut(gradInput, gradOutput, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftMarginLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { - - retVal, err := ts.SoftMarginLossOut(out, target, reduction, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Softmax(dim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftplus(del bool)(retVal Tensor) { - - retVal, err := ts.Softplus(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftplusBackward(gradOutput Tensor, beta Scalar, threshold Scalar, output Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, output, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftplusBackwardOut(gradInput Tensor, gradOutput Tensor, beta Scalar, threshold Scalar, output Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SoftplusBackwardOut(gradInput, gradOutput, beta, threshold, output, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftplusOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SoftplusOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftshrink(del bool)(retVal Tensor) { - - retVal, err := ts.Softshrink(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftshrinkBackward(gradOutput Tensor, lambd Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftshrinkBackwardOut(gradInput Tensor, gradOutput Tensor, lambd Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.SoftshrinkBackwardOut(gradInput, gradOutput, lambd, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSoftshrinkOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SoftshrinkOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := SparseCooTensor(size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustSparseCooTensor1(indices Tensor, values Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := SparseCooTensor1(indices, values, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustSparseCooTensor2(indices Tensor, values Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := SparseCooTensor2(indices, values, size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSparseMask(mask Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SparseMask(mask, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64)() { - - err := ts.SparseResize_(size, sparseDim, denseDim) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)() { - - err := ts.SparseResizeAndClear_(size, sparseDim, denseDim) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSqrt(del bool)(retVal Tensor) { - - retVal, err := ts.Sqrt(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSqrt_()() { - - err := ts.Sqrt_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSqrtOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SqrtOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSquare(del bool)(retVal Tensor) { - - retVal, err := ts.Square(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSquare_()() { - - err := ts.Square_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSqueeze(del bool)(retVal Tensor) { - - retVal, err := ts.Squeeze(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSqueeze1(dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.Squeeze1(dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSqueeze_()() { - - err := ts.Squeeze_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSqueeze1_(dim int64)() { - - err := ts.Squeeze1_(dim) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSspaddmm(mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Sspaddmm(mat1, mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSspaddmmOut(out Tensor, mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SspaddmmOut(out, mat1, mat2, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustStack(tensors []Tensor, dim int64)(retVal Tensor) { - - retVal, err := Stack(tensors, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustStackOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor) { - - retVal, err := StackOut(out, tensors, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustStd(unbiased bool, del bool)(retVal Tensor) { - - retVal, err := ts.Std(unbiased, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustStd1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.Std1(dim, unbiased, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustStdOut(out Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustStft(nFft int64, hopLength int64, winLength int64, window Tensor, normalized bool, onesided bool, del bool)(retVal Tensor) { - - retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSub(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Sub(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSub1(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Sub1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSub_(other Tensor)() { - - err := ts.Sub_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSub1_(other Scalar)() { - - err := ts.Sub1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustSubOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.SubOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSum(dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Sum(dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSum1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Sum1(dim, keepdim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSumOut(out Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.SumOut(out, dim, keepdim, dtype, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustSumToSize(size []int64, del bool)(retVal Tensor) { - - retVal, err := ts.SumToSize(size, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustT(del bool)(retVal Tensor) { - - retVal, err := ts.T(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustT_()() { - - err := ts.T_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustTake(index Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Take(index, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTakeOut(out Tensor, index Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.TakeOut(out, index, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTan(del bool)(retVal Tensor) { - - retVal, err := ts.Tan(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTan_()() { - - err := ts.Tan_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustTanOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.TanOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTanh(del bool)(retVal Tensor) { - - retVal, err := ts.Tanh(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTanh_()() { - - err := ts.Tanh_() - if err != nil { log.Fatal(err) } - - return -} - -func MustTanhBackward(gradOutput Tensor, output Tensor)(retVal Tensor) { - - retVal, err := TanhBackward(gradOutput, output) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustTanhBackwardOut(gradInput Tensor, gradOutput Tensor, output Tensor)(retVal Tensor) { - - retVal, err := TanhBackwardOut(gradInput, gradOutput, output) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTanhOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.TanhOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTensordot(other Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal Tensor) { - - retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustThreshold(threshold Scalar, value Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.Threshold(threshold, value, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustThreshold_(threshold Scalar, value Scalar)() { - - err := ts.Threshold_(threshold, value) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustThresholdBackward(gradOutput Tensor, threshold Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.ThresholdBackward(gradOutput, threshold, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustThresholdOut(out Tensor, threshold Scalar, value Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.ThresholdOut(out, threshold, value, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTo(device gotch.Device, del bool)(retVal Tensor) { - - retVal, err := ts.To(device, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTo1(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal Tensor) { - - retVal, err := ts.To1(optionsKind, optionsDevice, nonBlocking, copy, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTo2(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal Tensor) { - - retVal, err := ts.To2(dtype, nonBlocking, copy, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTo3(other Tensor, nonBlocking bool, copy bool, del bool)(retVal Tensor) { - - retVal, err := ts.To3(other, nonBlocking, copy, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTo4(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal Tensor) { - - retVal, err := ts.To4(device, dtype, nonBlocking, copy, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustToDense(del bool)(retVal Tensor) { - - retVal, err := ts.ToDense(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustToDenseBackward(grad Tensor, input Tensor)(retVal Tensor) { - - retVal, err := ToDenseBackward(grad, input) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustToMkldnn(del bool)(retVal Tensor) { - - retVal, err := ts.ToMkldnn(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustToMkldnnBackward(grad Tensor, input Tensor)(retVal Tensor) { - - retVal, err := ToMkldnnBackward(grad, input) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustToSparse(del bool)(retVal Tensor) { - - retVal, err := ts.ToSparse(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustToSparse1(sparseDim int64, del bool)(retVal Tensor) { - - retVal, err := ts.ToSparse1(sparseDim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTotype(scalarType gotch.DType, del bool)(retVal Tensor) { - - retVal, err := ts.Totype(scalarType, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTrace(del bool)(retVal Tensor) { - - retVal, err := ts.Trace(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTranspose(dim0 int64, dim1 int64, del bool)(retVal Tensor) { - - retVal, err := ts.Transpose(dim0, dim1, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTranspose_(dim0 int64, dim1 int64)() { - - err := ts.Transpose_(dim0, dim1) - if err != nil { log.Fatal(err) } - - return -} - -func MustTrapz(y Tensor, x Tensor, dim int64)(retVal Tensor) { - - retVal, err := Trapz(y, x, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustTrapz1(y Tensor, dx float64, dim int64)(retVal Tensor) { - - retVal, err := Trapz1(y, dx, dim) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTril(diagonal int64, del bool)(retVal Tensor) { - - retVal, err := ts.Tril(diagonal, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTril_(diagonal int64)() { - - err := ts.Tril_(diagonal) - if err != nil { log.Fatal(err) } - - return -} - -func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTrilOut(out Tensor, diagonal int64, del bool)(retVal Tensor) { - - retVal, err := ts.TrilOut(out, diagonal, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustTripletMarginLoss(anchor Tensor, positive Tensor, negative Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal Tensor) { - - retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTriu(diagonal int64, del bool)(retVal Tensor) { - - retVal, err := ts.Triu(diagonal, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTriu_(diagonal int64)() { - - err := ts.Triu_(diagonal) - if err != nil { log.Fatal(err) } - - return -} - -func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTriuOut(out Tensor, diagonal int64, del bool)(retVal Tensor) { - - retVal, err := ts.TriuOut(out, diagonal, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTrueDivide(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.TrueDivide(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTrueDivide1(other Scalar, del bool)(retVal Tensor) { - - retVal, err := ts.TrueDivide1(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTrueDivide_(other Tensor)() { - - err := ts.TrueDivide_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustTrueDivide1_(other Scalar)() { - - err := ts.TrueDivide1_(other) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustTrueDivideOut(out Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.TrueDivideOut(out, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTrunc(del bool)(retVal Tensor) { - - retVal, err := ts.Trunc(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTrunc_()() { - - err := ts.Trunc_() - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustTruncOut(out Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.TruncOut(out, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustTypeAs(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.TypeAs(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUnfold(dimension int64, size int64, step int64, del bool)(retVal Tensor) { - - retVal, err := ts.Unfold(dimension, size, step, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUniform_(from float64, to float64)() { - - err := ts.Uniform_(from, to) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustUnsqueeze(dim int64, del bool)(retVal Tensor) { - - retVal, err := ts.Unsqueeze(dim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUnsqueeze_(dim int64)() { - - err := ts.Unsqueeze_(dim) - if err != nil { log.Fatal(err) } - - return -} - -func(ts Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleBicubic2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleBicubic2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleBicubic2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleBicubic2dOut(out Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleBilinear2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleBilinear2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleBilinear2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleBilinear2dOut(out Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleLinear1dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64)(retVal Tensor) { - - retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleLinear1dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64)(retVal Tensor) { - - retVal, err := UpsampleLinear1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleLinear1dOut(out Tensor, outputSize []int64, alignCorners bool, scales float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleNearest1d(outputSize []int64, scales float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleNearest1d(outputSize, scales, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleNearest1dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scales float64)(retVal Tensor) { - - retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleNearest1dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scales float64)(retVal Tensor) { - - retVal, err := UpsampleNearest1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scales) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleNearest1dOut(out Tensor, outputSize []int64, scales float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleNearest2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleNearest2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleNearest2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleNearest2dOut(out Tensor, outputSize []int64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleNearest3dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleNearest3dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleNearest3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleNearest3dOut(out Tensor, outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleTrilinear3dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustUpsampleTrilinear3dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor) { - - retVal, err := UpsampleTrilinear3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustUpsampleTrilinear3dOut(out Tensor, outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { - - retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustValues(del bool)(retVal Tensor) { - - retVal, err := ts.Values(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustVar(unbiased bool, del bool)(retVal Tensor) { - - retVal, err := ts.Var(unbiased, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustVar1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.Var1(dim, unbiased, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustVarOut(out Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor) { - - retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustView(size []int64, del bool)(retVal Tensor) { - - retVal, err := ts.View(size, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustViewAs(other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.ViewAs(other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustWhere1(condition Tensor, other Tensor, del bool)(retVal Tensor) { - - retVal, err := ts.Where1(condition, other, del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustZero_()() { - - err := ts.Zero_() - if err != nil { log.Fatal(err) } - - return -} - -func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { - - retVal, err := Zeros(size, optionsKind, optionsDevice) - if err != nil { log.Fatal(err) } - - return retVal -} - -func(ts Tensor) MustZerosLike(del bool)(retVal Tensor) { - - retVal, err := ts.ZerosLike(del) - if err != nil { log.Fatal(err) } - - return retVal -} - -func MustZerosOut(out Tensor, size []int64)(retVal Tensor) { - - retVal, err := ZerosOut(out, size) - if err != nil { log.Fatal(err) } - - return retVal -} -// End of implementing Tensor ================================= + err := ts.__And_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__And1(other *Tensor) { + + err := ts.__And1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Iand_(other *Scalar) { + + err := ts.__Iand_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Iand1(other *Tensor) { + + err := ts.__Iand1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Ilshift_(other *Scalar) { + + err := ts.__Ilshift_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Ilshift1(other *Tensor) { + + err := ts.__Ilshift1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Ior_(other *Scalar) { + + err := ts.__Ior_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Ior1(other *Tensor) { + + err := ts.__Ior1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Irshift_(other *Scalar) { + + err := ts.__Irshift_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Irshift1(other *Tensor) { + + err := ts.__Irshift1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Ixor_(other *Scalar) { + + err := ts.__Ixor_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Ixor1(other *Tensor) { + + err := ts.__Ixor1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Lshift_(other *Scalar) { + + err := ts.__Lshift_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Lshift1(other *Tensor) { + + err := ts.__Lshift1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Or_(other *Scalar) { + + err := ts.__Or_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Or1(other *Tensor) { + + err := ts.__Or1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Rshift_(other *Scalar) { + + err := ts.__Rshift_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Rshift1(other *Tensor) { + + err := ts.__Rshift1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Xor_(other *Scalar) { + + err := ts.__Xor_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must__Xor1(other *Tensor) { + + err := ts.__Xor1(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts._AdaptiveAvgPool2d(outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_Addr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._Addr(vec1, vec2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_Addr_(vec1 *Tensor, vec2 *Tensor) { + + err := ts._Addr_(vec1, vec2) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must_AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._AddrOut(out, vec1, vec2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_AmpUpdateScale(growthTracker *Tensor, currentScale *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) (retVal *Tensor) { + + retVal, err := _AmpUpdateScale(growthTracker, currentScale, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_BaddbmmMkl_(batch1 *Tensor, batch2 *Tensor) { + + err := ts._BaddbmmMkl_(batch1, batch2) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must_CastByte(nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CastByte(nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CastChar(nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CastChar(nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CastDouble(nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CastDouble(nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CastFloat(nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CastFloat(nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CastHalf(nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CastHalf(nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CastInt(nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CastInt(nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CastLong(nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CastLong(nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CastShort(nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CastShort(nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_Cat(tensors []Tensor, dim int64) (retVal *Tensor) { + + retVal, err := _Cat(tensors, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { + + retVal, err := _CatOut(out, tensors, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor) (retVal *Tensor) { + + retVal, err := _CdistBackward(grad, x1, x2, p, cdist) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CholeskyHelper(upper bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CholeskyHelper(upper, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CholeskySolveHelper(a *Tensor, upper bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CholeskySolveHelper(a, upper, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_Coalesced_(coalesced bool) { + + err := ts._Coalesced_(coalesced) + if err != nil { + log.Fatal(err) + } + + return +} + +func Must_Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool) (retVal *Tensor) { + + retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_ConvolutionNogroup(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64) (retVal *Tensor) { + + retVal, err := _ConvolutionNogroup(input, weight, bias, stride, padding, dilation, transposed, outputPadding) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CopyFrom(dst *Tensor, nonBlocking bool, del bool) (retVal *Tensor) { + + retVal, err := ts._CopyFrom(dst, nonBlocking, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool) (retVal *Tensor) { + + retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, bidirectional bool) (retVal *Tensor) { + + retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, numLayers, batchFirst, bidirectional) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_Cumprod(dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts._Cumprod(dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CumprodOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts._CumprodOut(out, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_Cumsum(dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts._Cumsum(dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_CumsumOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts._CumsumOut(out, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_DimArange(like *Tensor, dim int64) (retVal *Tensor) { + + retVal, err := _DimArange(like, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor) (retVal *Tensor) { + + retVal, err := _DirichletGrad(x, alpha, total) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor) (retVal *Tensor) { + + retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor) (retVal *Tensor) { + + retVal, err := _EmbeddingBagDenseBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64) (retVal *Tensor) { + + retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor) (retVal *Tensor) { + + retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64) (retVal *Tensor) { + + retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool) (retVal *Tensor) { + + retVal, err := ts._FftWithSize(signalNdim, complexInput, complexOutput, inverse, checkedSignalSizes, normalized, onesided, outputSizes, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._GatherSparseBackward(dim, index, grad, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_IndexCopy_(dim int64, index *Tensor, source *Tensor) { + + err := ts._IndexCopy_(dim, index, source) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must_IndexPutImpl_(indices []Tensor, values *Tensor, accumulate bool, unsafety bool) { + + err := ts._IndexPutImpl_(indices, values, accumulate, unsafety) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must_Indices(del bool) (retVal *Tensor) { + + retVal, err := ts._Indices(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_InverseHelper(del bool) (retVal *Tensor) { + + retVal, err := ts._InverseHelper(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor) { + + retVal, err := ts._LogSoftmax(dim, halfToFloat, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts._LogSoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_LuSolveHelper(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._LuSolveHelper(lUData, lUPivots, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool) (retVal *Tensor) { + + retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool) (retVal *Tensor) { + + retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_MaskedScale(mask *Tensor, scale float64, del bool) (retVal *Tensor) { + + retVal, err := ts._MaskedScale(mask, scale, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_MkldnnReshape(shape []int64, del bool) (retVal *Tensor) { + + retVal, err := ts._MkldnnReshape(shape, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor) { + + retVal, err := ts._MkldnnTranspose(dim0, dim1, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64) { + + err := ts._MkldnnTranspose_(dim0, dim1) + if err != nil { + log.Fatal(err) + } + + return +} + +func Must_MultinomialAliasDraw(j *Tensor, q *Tensor, numSamples int64) (retVal *Tensor) { + + retVal, err := _MultinomialAliasDraw(j, q, numSamples) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64) (retVal *Tensor) { + + retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_NnpackSpatialConvolutionBackwardInput(input *Tensor, gradOutput *Tensor, weight *Tensor, padding []int64) (retVal *Tensor) { + + retVal, err := _NnpackSpatialConvolutionBackwardInput(input, gradOutput, weight, padding) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_NnpackSpatialConvolutionBackwardWeight(input *Tensor, weightsize []int64, gradOutput *Tensor, padding []int64) (retVal *Tensor) { + + retVal, err := _NnpackSpatialConvolutionBackwardWeight(input, weightsize, gradOutput, padding) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool) (retVal *Tensor) { + + retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._PdistBackward(grad, p, pdist, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_ReshapeFromTensor(shape *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._ReshapeFromTensor(shape, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SWhere(condition *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._SWhere(condition, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SampleDirichlet(del bool) (retVal *Tensor) { + + retVal, err := ts._SampleDirichlet(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_ShapeAsTensor(del bool) (retVal *Tensor) { + + retVal, err := ts._ShapeAsTensor(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64) { + + err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must_SobolEngineInitializeState_(dimension int64) { + + err := ts._SobolEngineInitializeState_(dimension) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must_SobolEngineScramble_(ltm *Tensor, dimension int64) { + + err := ts._SobolEngineScramble_(ltm, dimension) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor) { + + retVal, err := ts._Softmax(dim, halfToFloat, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts._SoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SparseAddmm(sparse *Tensor, dense *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._SparseAddmm(sparse, dense, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_SparseMm(sparse *Tensor, dense *Tensor) (retVal *Tensor) { + + retVal, err := _SparseMm(sparse, dense) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SparseSum(del bool) (retVal *Tensor) { + + retVal, err := ts._SparseSum(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SparseSum1(dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts._SparseSum1(dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SparseSum2(dim []int64, del bool) (retVal *Tensor) { + + retVal, err := ts._SparseSum2(dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SparseSum3(dim []int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts._SparseSum3(dim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_SparseSumBackward(grad *Tensor, dim []int64, del bool) (retVal *Tensor) { + + retVal, err := ts._SparseSumBackward(grad, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_StandardGamma(del bool) (retVal *Tensor) { + + retVal, err := ts._StandardGamma(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_StandardGammaGrad(output *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts._StandardGammaGrad(output, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_Std(unbiased bool, del bool) (retVal *Tensor) { + + retVal, err := ts._Std(unbiased, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64) (retVal *Tensor) { + + retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_UnsafeView(size []int64, del bool) (retVal *Tensor) { + + retVal, err := ts._UnsafeView(size, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_Values(del bool) (retVal *Tensor) { + + retVal, err := ts._Values(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) Must_Var(unbiased bool, del bool) (retVal *Tensor) { + + retVal, err := ts._Var(unbiased, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func Must_WeightNorm(v *Tensor, g *Tensor, dim int64) (retVal *Tensor) { + + retVal, err := _WeightNorm(v, g, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAbs(del bool) (retVal *Tensor) { + + retVal, err := ts.Abs(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAbs_() { + + err := ts.Abs_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAbsOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AbsOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAcos(del bool) (retVal *Tensor) { + + retVal, err := ts.Acos(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAcos_() { + + err := ts.Acos_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAcosOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AcosOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool1d(outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool2d(outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3d(outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dBackward(gradOutput, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveAvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dBackwardOut(gradInput, gradOutput, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveMaxPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool2dBackwardOut(gradInput, gradOutput, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdaptiveMaxPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool3dBackwardOut(gradInput, gradOutput, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdd(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Add(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdd1(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Add1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAdd_(other *Tensor) { + + err := ts.Add_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAdd1_(other *Scalar) { + + err := ts.Add1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAddOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AddOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Addbmm(batch1, batch2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddbmm_(batch1 *Tensor, batch2 *Tensor) { + + err := ts.Addbmm_(batch1, batch2) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AddbmmOut(out, batch1, batch2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Addcdiv(tensor1, tensor2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddcdiv_(tensor1 *Tensor, tensor2 *Tensor) { + + err := ts.Addcdiv_(tensor1, tensor2) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddcmul(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Addcmul(tensor1, tensor2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddcmul_(tensor1 *Tensor, tensor2 *Tensor) { + + err := ts.Addcmul_(tensor1, tensor2) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Addmm(mat1, mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddmm_(mat1 *Tensor, mat2 *Tensor) { + + err := ts.Addmm_(mat1, mat2) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AddmmOut(out, mat1, mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddmv(mat *Tensor, vec *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Addmv(mat, vec, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddmv_(mat *Tensor, vec *Tensor) { + + err := ts.Addmv_(mat, vec) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AddmvOut(out, mat, vec, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Addr(vec1, vec2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAddr_(vec1 *Tensor, vec2 *Tensor) { + + err := ts.Addr_(vec1, vec2) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AddrOut(out, vec1, vec2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustAffineGridGenerator(theta *Tensor, size []int64, alignCorners bool) (retVal *Tensor) { + + retVal, err := AffineGridGenerator(theta, size, alignCorners) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustAffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool) (retVal *Tensor) { + + retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAlias(del bool) (retVal *Tensor) { + + retVal, err := ts.Alias(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAlignAs(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AlignAs(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAll(del bool) (retVal *Tensor) { + + retVal, err := ts.All(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAll1(dim int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.All1(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAllOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.AllOut(out, dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { + + retVal, err := AlphaDropout(input, p, train) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAlphaDropout_(p float64, train bool) { + + err := ts.AlphaDropout_(p, train) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAngle(del bool) (retVal *Tensor) { + + retVal, err := ts.Angle(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAngleOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AngleOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAny(del bool) (retVal *Tensor) { + + retVal, err := ts.Any(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAny1(dim int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Any1(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAnyOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.AnyOut(out, dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustArange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Arange(end, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustArange1(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Arange1(start, end, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustArange2(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Arange2(start, end, step, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustArangeOut(out *Tensor, end *Scalar) (retVal *Tensor) { + + retVal, err := ArangeOut(out, end) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustArangeOut1(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor) { + + retVal, err := ArangeOut1(out, start, end) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustArgmax(dim int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Argmax(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustArgmin(dim int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Argmin(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustArgsort(dim int64, descending bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Argsort(dim, descending, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAsStrided(size []int64, stride []int64, storageOffset int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AsStrided(size, stride, storageOffset, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset int64) { + + err := ts.AsStrided_(size, stride, storageOffset) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAsin(del bool) (retVal *Tensor) { + + retVal, err := ts.Asin(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAsin_() { + + err := ts.Asin_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAsinOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AsinOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAtan(del bool) (retVal *Tensor) { + + retVal, err := ts.Atan(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAtan2(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Atan2(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAtan2_(other *Tensor) { + + err := ts.Atan2_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAtan2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Atan2Out(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAtan_() { + + err := ts.Atan_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustAtanOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.AtanOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool2dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool3dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustAvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { + + retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBaddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Baddbmm(batch1, batch2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBaddbmm_(batch1 *Tensor, batch2 *Tensor) { + + err := ts.Baddbmm_(batch1, batch2) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BaddbmmOut(out, batch1, batch2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBartlettWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := BartlettWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor) { + + retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor) (retVal *Tensor) { + + retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor) { + + retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor) { + + retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBernoulli(del bool) (retVal *Tensor) { + + retVal, err := ts.Bernoulli(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBernoulli1(p float64, del bool) (retVal *Tensor) { + + retVal, err := ts.Bernoulli1(p, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBernoulli_(p *Tensor) { + + err := ts.Bernoulli_(p) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBernoulli1_(p float64) { + + err := ts.Bernoulli1_(p) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBernoulliOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BernoulliOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { + + retVal, err := Bilinear(input1, input2, weight, bias) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBinaryCrossEntropyBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyBackwardOut(gradInput, gradOutput, target, weight, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyWithLogitsBackward(gradOutput, target, weight, posWeight, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBincount(weights *Tensor, minlength int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Bincount(weights, minlength, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseAnd(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseAnd(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseAnd1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseAnd1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseAnd_(other *Scalar) { + + err := ts.BitwiseAnd_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBitwiseAnd1_(other *Tensor) { + + err := ts.BitwiseAnd1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBitwiseAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseAndOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseAndOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseAndOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseNot(del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseNot(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseNot_() { + + err := ts.BitwiseNot_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBitwiseNotOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseNotOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseOr(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseOr(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseOr1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseOr1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseOr_(other *Scalar) { + + err := ts.BitwiseOr_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBitwiseOr1_(other *Tensor) { + + err := ts.BitwiseOr1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBitwiseOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseOrOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseOrOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseOrOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseXor(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseXor(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseXor1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseXor1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseXor_(other *Scalar) { + + err := ts.BitwiseXor_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBitwiseXor1_(other *Tensor) { + + err := ts.BitwiseXor1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustBitwiseXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseXorOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBitwiseXorOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.BitwiseXorOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustBlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := BlackmanWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBmm(mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Bmm(mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustBmmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.BmmOut(out, mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCartesianProd(tensors []Tensor) (retVal *Tensor) { + + retVal, err := CartesianProd(tensors) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCat(tensors []Tensor, dim int64) (retVal *Tensor) { + + retVal, err := Cat(tensors, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { + + retVal, err := CatOut(out, tensors, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCauchy_(median float64, sigma float64) { + + err := ts.Cauchy_(median, sigma) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustCdist(x1 *Tensor, x2 *Tensor, p float64, computeMode int64) (retVal *Tensor) { + + retVal, err := Cdist(x1, x2, p, computeMode) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCeil(del bool) (retVal *Tensor) { + + retVal, err := ts.Ceil(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCeil_() { + + err := ts.Ceil_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustCeilOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.CeilOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCelu(del bool) (retVal *Tensor) { + + retVal, err := ts.Celu(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCelu_() { + + err := ts.Celu_() + if err != nil { + log.Fatal(err) + } + + return +} + +func MustChainMatmul(matrices []Tensor) (retVal *Tensor) { + + retVal, err := ChainMatmul(matrices) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCholesky(upper bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Cholesky(upper, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCholeskyInverse(upper bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CholeskyInverse(upper, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCholeskyInverseOut(out *Tensor, upper bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CholeskyInverseOut(out, upper, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CholeskyOut(out, upper, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCholeskySolve(input2 *Tensor, upper bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CholeskySolve(input2, upper, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CholeskySolveOut(out, input2, upper, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustClamp(min *Scalar, max *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Clamp(min, max, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustClamp_(min *Scalar, max *Scalar) { + + err := ts.Clamp_(min, max) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustClampMax(max *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.ClampMax(max, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustClampMax_(max *Scalar) { + + err := ts.ClampMax_(max) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustClampMaxOut(out *Tensor, max *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.ClampMaxOut(out, max, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustClampMin(min *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.ClampMin(min, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustClampMin_(min *Scalar) { + + err := ts.ClampMin_(min) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustClampMinOut(out *Tensor, min *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.ClampMinOut(out, min, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustClampOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.ClampOut(out, min, max, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCoalesce(del bool) (retVal *Tensor) { + + retVal, err := ts.Coalesce(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCol2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { + + retVal, err := Col2imBackward(gradOutput, kernelSize, dilation, padding, stride) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCol2imBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { + + retVal, err := Col2imBackwardOut(gradInput, gradOutput, kernelSize, dilation, padding, stride) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCol2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCombinations(r int64, withReplacement bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Combinations(r, withReplacement, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustConj(del bool) (retVal *Tensor) { + + retVal, err := ts.Conj(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustConjOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ConjOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustConstantPadNd(pad []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ConstantPadNd(pad, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustContiguous(del bool) (retVal *Tensor) { + + retVal, err := ts.Contiguous(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustConv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { + + retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustConv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { + + retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustConv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { + + retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ConvTbc(weight, bias, pad, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { + + retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { + + retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { + + retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustConvolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor) { + + retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor) { + + retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCopySparseToSparse_(src *Tensor, nonBlocking bool) { + + err := ts.CopySparseToSparse_(src, nonBlocking) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustCos(del bool) (retVal *Tensor) { + + retVal, err := ts.Cos(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCos_() { + + err := ts.Cos_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustCosOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.CosOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCosh(del bool) (retVal *Tensor) { + + retVal, err := ts.Cosh(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCosh_() { + + err := ts.Cosh_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustCoshOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.CoshOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor) { + + retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64) (retVal *Tensor) { + + retVal, err := CosineSimilarity(x1, x2, dim, eps) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCross(other *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Cross(other, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCrossOut(out *Tensor, other *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.CrossOut(out, other, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor) { + + retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCtcLoss1(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor) { + + retVal, err := CtcLoss1(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor) { + + retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor) { + + retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCudnnConvolution1(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CudnnConvolution1(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { + + retVal, err := CudnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCudnnConvolutionTranspose1(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionTranspose1(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustCudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { + + retVal, err := CudnnConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCudnnGridSampler(grid *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.CudnnGridSampler(grid, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Cumprod(dim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.CumprodOut(out, dim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Cumsum(dim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustCumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.CumsumOut(out, dim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustData(del bool) (retVal *Tensor) { + + retVal, err := ts.Data(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDequantize(del bool) (retVal *Tensor) { + + retVal, err := ts.Dequantize(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDet(del bool) (retVal *Tensor) { + + retVal, err := ts.Det(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDetach(del bool) (retVal *Tensor) { + + retVal, err := ts.Detach(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDetach_() { + + err := ts.Detach_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustDiag(diagonal int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Diag(diagonal, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor) { + + retVal, err := ts.DiagEmbed(offset, dim1, dim2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDiagOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { + + retVal, err := ts.DiagOut(out, diagonal, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDiagflat(offset int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Diagflat(offset, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Diagonal(offset, dim1, dim2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDigamma(del bool) (retVal *Tensor) { + + retVal, err := ts.Digamma(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDigamma_() { + + err := ts.Digamma_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustDigammaOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.DigammaOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDist(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Dist(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDiv(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Div(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDiv1(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Div1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDiv_(other *Tensor) { + + err := ts.Div_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustDiv1_(other *Scalar) { + + err := ts.Div1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustDivOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.DivOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDot(tensor *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Dot(tensor, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDotOut(out *Tensor, tensor *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.DotOut(out, tensor, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { + + retVal, err := Dropout(input, p, train) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustDropout_(p float64, train bool) { + + err := ts.Dropout_(p, train) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustEinsum(equation string, tensors []Tensor) (retVal *Tensor) { + + retVal, err := Einsum(equation, tensors) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustElu(del bool) (retVal *Tensor) { + + retVal, err := ts.Elu(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustElu_() { + + err := ts.Elu_() + if err != nil { + log.Fatal(err) + } + + return +} + +func MustEluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, output *Tensor) (retVal *Tensor) { + + retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, output) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEluBackwardOut(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, output *Tensor) (retVal *Tensor) { + + retVal, err := EluBackwardOut(gradInput, gradOutput, alpha, scale, inputScale, output) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustEluOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.EluOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEmbedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor) { + + retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor) { + + retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor) { + + retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustEmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64) { + + err := ts.EmbeddingRenorm_(indices, maxNorm, normType) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustEmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor) { + + retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Empty(size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustEmptyLike(del bool) (retVal *Tensor) { + + retVal, err := ts.EmptyLike(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEmptyOut(out *Tensor, size []int64) (retVal *Tensor) { + + retVal, err := EmptyOut(out, size) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustEq(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Eq(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustEq1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Eq1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustEq_(other *Scalar) { + + err := ts.Eq_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustEq1_(other *Tensor) { + + err := ts.Eq1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustEqOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.EqOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustEqOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.EqOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustErf(del bool) (retVal *Tensor) { + + retVal, err := ts.Erf(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustErf_() { + + err := ts.Erf_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustErfOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ErfOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustErfc(del bool) (retVal *Tensor) { + + retVal, err := ts.Erfc(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustErfc_() { + + err := ts.Erfc_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustErfcOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ErfcOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustErfinv(del bool) (retVal *Tensor) { + + retVal, err := ts.Erfinv(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustErfinv_() { + + err := ts.Erfinv_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustErfinvOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ErfinvOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustExp(del bool) (retVal *Tensor) { + + retVal, err := ts.Exp(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustExp_() { + + err := ts.Exp_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustExpOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ExpOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustExpand(size []int64, implicit bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Expand(size, implicit, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustExpandAs(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ExpandAs(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustExpm1(del bool) (retVal *Tensor) { + + retVal, err := ts.Expm1(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustExpm1_() { + + err := ts.Expm1_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustExpm1Out(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Expm1Out(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustExponential_(lambd float64) { + + err := ts.Exponential_(lambd) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Eye(n, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEye1(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Eye1(n, m, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEyeOut(out *Tensor, n int64) (retVal *Tensor) { + + retVal, err := EyeOut(out, n) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustEyeOut1(out *Tensor, n int64, m int64) (retVal *Tensor) { + + retVal, err := EyeOut1(out, n, m) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFakeQuantizePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFakeQuantizePerTensorAffineBackward(grad *Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor) { + + retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor) { + + retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor) { + + retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor) { + + retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFbgemmPackGemmMatrixFp16(input *Tensor) (retVal *Tensor) { + + retVal, err := FbgemmPackGemmMatrixFp16(input) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFbgemmPackQuantizedMatrix(input *Tensor) (retVal *Tensor) { + + retVal, err := FbgemmPackQuantizedMatrix(input) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFbgemmPackQuantizedMatrix1(input *Tensor, k int64, n int64) (retVal *Tensor) { + + retVal, err := FbgemmPackQuantizedMatrix1(input, k, n) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFeatureAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { + + retVal, err := FeatureAlphaDropout(input, p, train) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFeatureAlphaDropout_(p float64, train bool) { + + err := ts.FeatureAlphaDropout_(p, train) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustFeatureDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { + + retVal, err := FeatureDropout(input, p, train) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFeatureDropout_(p float64, train bool) { + + err := ts.FeatureDropout_(p, train) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFft(signalNdim int64, normalized bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Fft(signalNdim, normalized, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFill_(value *Scalar) { + + err := ts.Fill_(value) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFill1_(value *Tensor) { + + err := ts.Fill1_(value) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFillDiagonal_(fillValue *Scalar, wrap bool) { + + err := ts.FillDiagonal_(fillValue, wrap) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFlatten(startDim int64, endDim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Flatten(startDim, endDim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFlip(dims []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Flip(dims, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFloor(del bool) (retVal *Tensor) { + + retVal, err := ts.Floor(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFloor_() { + + err := ts.Floor_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFloorDivide(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FloorDivide(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFloorDivide1(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.FloorDivide1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFloorDivide_(other *Tensor) { + + err := ts.FloorDivide_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFloorDivide1_(other *Scalar) { + + err := ts.FloorDivide1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFloorDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FloorDivideOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFloorOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FloorOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFmod(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Fmod(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFmod1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Fmod1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFmod_(other *Scalar) { + + err := ts.Fmod_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFmod1_(other *Tensor) { + + err := ts.Fmod1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFmodOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.FmodOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFmodOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FmodOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFrac(del bool) (retVal *Tensor) { + + retVal, err := ts.Frac(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFrac_() { + + err := ts.Frac_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustFracOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FracOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFractionalMaxPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool2dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFractionalMaxPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool3dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFrobeniusNorm(del bool) (retVal *Tensor) { + + retVal, err := ts.FrobeniusNorm(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFrobeniusNorm1(dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.FrobeniusNorm1(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFromFile(filename string, shared bool, size int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Full(size, fillValue, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustFullLike(fillValue *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.FullLike(fillValue, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustFullOut(out *Tensor, size []int64, fillValue *Scalar) (retVal *Tensor) { + + retVal, err := FullOut(out, size, fillValue) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGather(dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Gather(dim, index, sparseGrad, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor) { + + retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGe(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Ge(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGe1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Ge1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGe_(other *Scalar) { + + err := ts.Ge_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustGe1_(other *Tensor) { + + err := ts.Ge1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustGeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.GeOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.GeOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGelu(del bool) (retVal *Tensor) { + + retVal, err := ts.Gelu(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGeluBackward(grad *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.GeluBackward(grad, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGeometric_(p float64) { + + err := ts.Geometric_(p) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustGer(vec2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Ger(vec2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGerOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.GerOut(out, vec2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGlu(dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Glu(dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGluBackward(gradOutput *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.GluBackward(gradOutput, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGluBackwardOut(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.GluBackwardOut(gradInput, gradOutput, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGluOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.GluOut(out, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGrad(del bool) (retVal *Tensor) { + + retVal, err := ts.Grad(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustGridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { + + retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustGridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { + + retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustGridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { + + retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustGroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool) (retVal *Tensor) { + + retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { + + retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGt(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Gt(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGt1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Gt1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGt_(other *Scalar) { + + err := ts.Gt_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustGt1_(other *Tensor) { + + err := ts.Gt1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustGtOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.GtOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustGtOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.GtOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustHammingWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := HammingWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustHammingWindow2(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := HammingWindow2(windowLength, periodic, alpha, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustHammingWindow3(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := HammingWindow3(windowLength, periodic, alpha, beta, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := HannWindow(windowLength, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustHannWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := HannWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardshrink(del bool) (retVal *Tensor) { + + retVal, err := ts.Hardshrink(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.HardshrinkBackward(gradOut, lambd, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardsigmoid(del bool) (retVal *Tensor) { + + retVal, err := ts.Hardsigmoid(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardsigmoid_() { + + err := ts.Hardsigmoid_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustHardsigmoidBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.HardsigmoidBackward(gradOutput, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardsigmoidOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.HardsigmoidOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardtanh(del bool) (retVal *Tensor) { + + retVal, err := ts.Hardtanh(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardtanh_() { + + err := ts.Hardtanh_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustHardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardtanhBackwardOut(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.HardtanhBackwardOut(gradInput, gradOutput, minVal, maxVal, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHardtanhOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.HardtanhOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHistc(bins int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Histc(bins, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustHistcOut(out *Tensor, bins int64, del bool) (retVal *Tensor) { + + retVal, err := ts.HistcOut(out, bins, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustHspmm(mat1 *Tensor, mat2 *Tensor) (retVal *Tensor) { + + retVal, err := Hspmm(mat1, mat2) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustHspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor) (retVal *Tensor) { + + retVal, err := HspmmOut(out, mat1, mat2) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIfft(signalNdim int64, normalized bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Ifft(signalNdim, normalized, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustIm2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { + + retVal, err := Im2colBackward(gradOutput, inputSize, kernelSize, dilation, padding, stride) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustIm2colBackwardOut(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { + + retVal, err := Im2colBackwardOut(gradInput, gradOutput, inputSize, kernelSize, dilation, padding, stride) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIm2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustImag(del bool) (retVal *Tensor) { + + retVal, err := ts.Imag(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndex(indices []Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Index(indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndexAdd(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.IndexAdd(dim, index, source, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndexAdd_(dim int64, index *Tensor, source *Tensor) { + + err := ts.IndexAdd_(dim, index, source) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustIndexCopy(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.IndexCopy(dim, index, source, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndexCopy_(dim int64, index *Tensor, source *Tensor) { + + err := ts.IndexCopy_(dim, index, source) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustIndexFill(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.IndexFill(dim, index, value, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndexFill1(dim int64, index *Tensor, value *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.IndexFill1(dim, index, value, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndexFill_(dim int64, index *Tensor, value *Scalar) { + + err := ts.IndexFill_(dim, index, value) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustIndexFill1_(dim int64, index *Tensor, value *Tensor) { + + err := ts.IndexFill1_(dim, index, value) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustIndexPut(indices []Tensor, values *Tensor, accumulate bool, del bool) (retVal *Tensor) { + + retVal, err := ts.IndexPut(indices, values, accumulate, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndexPut_(indices []Tensor, values *Tensor, accumulate bool) { + + err := ts.IndexPut_(indices, values, accumulate) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustIndexSelect(dim int64, index *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.IndexSelect(dim, index, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.IndexSelectOut(out, dim, index, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIndices(del bool) (retVal *Tensor) { + + retVal, err := ts.Indices(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustInstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor) { + + retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIntRepr(del bool) (retVal *Tensor) { + + retVal, err := ts.IntRepr(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustInverse(del bool) (retVal *Tensor) { + + retVal, err := ts.Inverse(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustInverseOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.InverseOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIrfft(signalNdim int64, normalized bool, onesided bool, signalSizes []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Irfft(signalNdim, normalized, onesided, signalSizes, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIsclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Isclose(other, rtol, atol, equalNan, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIsfinite(del bool) (retVal *Tensor) { + + retVal, err := ts.Isfinite(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIsinf(del bool) (retVal *Tensor) { + + retVal, err := ts.Isinf(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustIsnan(del bool) (retVal *Tensor) { + + retVal, err := ts.Isnan(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustKlDiv(target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.KlDiv(target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustKlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.KlDivBackward(gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustL1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.L1Loss(target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.L1LossBackward(gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.L1LossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.L1LossOut(out, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool) (retVal *Tensor) { + + retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLe(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Le(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLe1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Le1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLe_(other *Scalar) { + + err := ts.Le_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLe1_(other *Tensor) { + + err := ts.Le1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.LeOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LeOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLeakyRelu(del bool) (retVal *Tensor) { + + retVal, err := ts.LeakyRelu(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLeakyRelu_() { + + err := ts.LeakyRelu_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor) { + + retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLeakyReluOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LeakyReluOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLerp(end *Tensor, weight *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Lerp(end, weight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLerp1(end *Tensor, weight *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Lerp1(end, weight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLerp_(end *Tensor, weight *Scalar) { + + err := ts.Lerp_(end, weight) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLerp1_(end *Tensor, weight *Tensor) { + + err := ts.Lerp1_(end, weight) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLerpOut(out *Tensor, end *Tensor, weight *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.LerpOut(out, end, weight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLerpOut1(out *Tensor, end *Tensor, weight *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LerpOut1(out, end, weight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLgamma(del bool) (retVal *Tensor) { + + retVal, err := ts.Lgamma(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLgamma_() { + + err := ts.Lgamma_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLgammaOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LgammaOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustLinear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { + + retVal, err := Linear(input, weight, bias) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustLinspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustLinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64) (retVal *Tensor) { + + retVal, err := LinspaceOut(out, start, end, steps) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLog(del bool) (retVal *Tensor) { + + retVal, err := ts.Log(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLog10(del bool) (retVal *Tensor) { + + retVal, err := ts.Log10(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLog10_() { + + err := ts.Log10_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLog10Out(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Log10Out(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLog1p(del bool) (retVal *Tensor) { + + retVal, err := ts.Log1p(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLog1p_() { + + err := ts.Log1p_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLog1pOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Log1pOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLog2(del bool) (retVal *Tensor) { + + retVal, err := ts.Log2(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLog2_() { + + err := ts.Log2_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLog2Out(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Log2Out(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLog_() { + + err := ts.Log_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLogNormal_(mean float64, std float64) { + + err := ts.LogNormal_(mean, std) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLogOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogSigmoid(del bool) (retVal *Tensor) { + + retVal, err := ts.LogSigmoid(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogSigmoidBackwardOut(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogSigmoidBackwardOut(gradInput, gradOutput, buffer, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogSigmoidOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogSigmoidOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.LogSoftmax(dim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogdet(del bool) (retVal *Tensor) { + + retVal, err := ts.Logdet(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogicalAnd(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogicalAnd(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogicalAnd_(other *Tensor) { + + err := ts.LogicalAnd_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLogicalAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogicalAndOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogicalNot(del bool) (retVal *Tensor) { + + retVal, err := ts.LogicalNot(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogicalNot_() { + + err := ts.LogicalNot_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLogicalNotOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogicalNotOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogicalOr(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogicalOr(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogicalOr_(other *Tensor) { + + err := ts.LogicalOr_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLogicalOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogicalOrOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogicalXor(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogicalXor(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogicalXor_(other *Tensor) { + + err := ts.LogicalXor_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLogicalXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LogicalXorOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustLogspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustLogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64) (retVal *Tensor) { + + retVal, err := LogspaceOut(out, start, end, steps, base) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Logsumexp(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.LogsumexpOut(out, dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLt(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Lt(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLt1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Lt1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLt_(other *Scalar) { + + err := ts.Lt_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLt1_(other *Tensor) { + + err := ts.Lt1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustLtOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.LtOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLtOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LtOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLuSolve(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LuSolve(lUData, lUPivots, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustLuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustMarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor) { + + retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaskedFill(mask *Tensor, value *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.MaskedFill(mask, value, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaskedFill1(mask *Tensor, value *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaskedFill1(mask, value, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaskedFill_(mask *Tensor, value *Scalar) { + + err := ts.MaskedFill_(mask, value) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustMaskedFill1_(mask *Tensor, value *Tensor) { + + err := ts.MaskedFill1_(mask, value) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustMaskedScatter(mask *Tensor, source *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaskedScatter(mask, source, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaskedScatter_(mask *Tensor, source *Tensor) { + + err := ts.MaskedScatter_(mask, source) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustMaskedSelect(mask *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaskedSelect(mask, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaskedSelectOut(out *Tensor, mask *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaskedSelectOut(out, mask, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMatmul(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Matmul(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MatmulOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMatrixPower(n int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MatrixPower(n, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMatrixRank(symmetric bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MatrixRank(symmetric, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMatrixRank1(tol float64, symmetric bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MatrixRank1(tol, symmetric, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMax(del bool) (retVal *Tensor) { + + retVal, err := ts.Max(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMax1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Max1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxPool2dWithIndicesBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxPool2dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxPool3dWithIndicesBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxPool3dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxUnpool2d(indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxUnpool2d(indices, outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dBackward(gradOutput, indices, outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxUnpool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dBackwardOut(gradInput, gradOutput, indices, outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dBackward(gradOutput, indices, outputSize, stride, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxUnpool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dBackwardOut(gradInput, gradOutput, indices, outputSize, stride, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMaxValues(dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MaxValues(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMean(dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Mean(dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMean1(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Mean1(dim, keepdim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMedian(del bool) (retVal *Tensor) { + + retVal, err := ts.Median(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMin(del bool) (retVal *Tensor) { + + retVal, err := ts.Min(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMin1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Min1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMinOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MinOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMinValues(dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MinValues(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustMiopenConvolutionBackwardBias(gradOutput *Tensor) (retVal *Tensor) { + + retVal, err := MiopenConvolutionBackwardBias(gradOutput) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustMiopenConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { + + retVal, err := MiopenConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMiopenConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MiopenConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustMiopenConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { + + retVal, err := MiopenConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MiopenConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustMiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { + + retVal, err := MiopenDepthwiseConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MiopenDepthwiseConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustMkldnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool) (retVal *Tensor) { + + retVal, err := MkldnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, biasDefined) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustMkldnnLinear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { + + retVal, err := MkldnnLinear(input, weight, bias) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMm(mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Mm(mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MmOut(out, mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMseLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MseLoss(target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMseLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MseLossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMseLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MseLossOut(out, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMul(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Mul(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMul1(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Mul1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMul_(other *Tensor) { + + err := ts.Mul_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustMul1_(other *Scalar) { + + err := ts.Mul1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustMulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MulOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMultiMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MultiMarginLossBackwardOut(gradInput, gradOutput, target, p, margin, weight, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMultilabelMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLoss(target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMultilabelMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossBackwardOut(gradInput, gradOutput, target, reduction, isTarget, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMultinomial(numSamples int64, replacement bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Multinomial(numSamples, replacement, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool) (retVal *Tensor) { + + retVal, err := ts.MultinomialOut(out, numSamples, replacement, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMv(vec *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Mv(vec, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMvOut(out *Tensor, vec *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.MvOut(out, vec, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMvlgamma(p int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Mvlgamma(p, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustMvlgamma_(p int64) { + + err := ts.Mvlgamma_(p) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustNarrow(dim int64, start int64, length int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Narrow(dim, start, length, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNarrow1(dim int64, start *Tensor, length int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Narrow1(dim, start, length, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool) (retVal *Tensor) { + + retVal, err := ts.NarrowCopy(dim, start, length, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNativeNorm(del bool) (retVal *Tensor) { + + retVal, err := ts.NativeNorm(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNe(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Ne(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNe1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Ne1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNe_(other *Scalar) { + + err := ts.Ne_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustNe1_(other *Tensor) { + + err := ts.Ne1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustNeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.NeOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.NeOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNeg(del bool) (retVal *Tensor) { + + retVal, err := ts.Neg(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNeg_() { + + err := ts.Neg_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustNegOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.NegOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { + + retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { + + retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { + + retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { + + retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { + + retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNllLoss2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.NllLoss2dBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { + + retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNllLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.NllLossBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { + + retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNonzero(del bool) (retVal *Tensor) { + + retVal, err := ts.Nonzero(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNonzeroOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.NonzeroOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNorm(del bool) (retVal *Tensor) { + + retVal, err := ts.Norm(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNorm1(p *Scalar, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Norm1(p, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNorm2(p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Norm2(p, dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNorm3(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Norm3(p, dim, keepdim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustNormExceptDim(v *Tensor, pow int64, dim int64) (retVal *Tensor) { + + retVal, err := NormExceptDim(v, pow, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.NormOut(out, p, dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNormOut1(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.NormOut1(out, p, dim, keepdim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNormal_(mean float64, std float64) { + + err := ts.Normal_(mean, std) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustNormalOut(out *Tensor, mean *Tensor, std float64) (retVal *Tensor) { + + retVal, err := NormalOut(out, mean, std) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustNormalOut1(out *Tensor, mean float64, std *Tensor) (retVal *Tensor) { + + retVal, err := NormalOut1(out, mean, std) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustNormalOut2(out *Tensor, mean *Tensor, std *Tensor) (retVal *Tensor) { + + retVal, err := NormalOut2(out, mean, std) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustNormalOut3(out *Tensor, mean float64, std float64, size []int64) (retVal *Tensor) { + + retVal, err := NormalOut3(out, mean, std, size) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNuclearNorm(keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.NuclearNorm(keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNuclearNorm1(dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.NuclearNorm1(dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNuclearNormOut(out *Tensor, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.NuclearNormOut(out, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNuclearNormOut1(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.NuclearNormOut1(out, dim, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustNumpyT(del bool) (retVal *Tensor) { + + retVal, err := ts.NumpyT(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustOneHot(numClasses int64, del bool) (retVal *Tensor) { + + retVal, err := ts.OneHot(numClasses, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Ones(size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustOnesLike(del bool) (retVal *Tensor) { + + retVal, err := ts.OnesLike(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustOnesOut(out *Tensor, size []int64) (retVal *Tensor) { + + retVal, err := OnesOut(out, size) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustOrgqr(input2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Orgqr(input2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustOrgqrOut(out *Tensor, input2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.OrgqrOut(out, input2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustOrmqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Ormqr(input2, input3, left, transpose, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustOrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor) { + + retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustPairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool) (retVal *Tensor) { + + retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPdist(p float64, del bool) (retVal *Tensor) { + + retVal, err := ts.Pdist(p, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPermute(dims []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Permute(dims, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPinMemory(del bool) (retVal *Tensor) { + + retVal, err := ts.PinMemory(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPinverse(rcond float64, del bool) (retVal *Tensor) { + + retVal, err := ts.Pinverse(rcond, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPixelShuffle(upscaleFactor int64, del bool) (retVal *Tensor) { + + retVal, err := ts.PixelShuffle(upscaleFactor, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPoisson(del bool) (retVal *Tensor) { + + retVal, err := ts.Poisson(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustPoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64) (retVal *Tensor) { + + retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPolygamma(n int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Polygamma(n, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPolygamma_(n int64) { + + err := ts.Polygamma_(n) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustPolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor) { + + retVal, err := ts.PolygammaOut(out, n, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPow(exponent *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Pow(exponent, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPow1(exponent *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Pow1(exponent, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustPow2(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor) { + + retVal, err := Pow2(selfScalar, exponent) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPow_(exponent *Scalar) { + + err := ts.Pow_(exponent) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustPow1_(exponent *Tensor) { + + err := ts.Pow1_(exponent) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustPowOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.PowOut(out, exponent, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPowOut1(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.PowOut1(out, exponent, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustPowOut2(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor) { + + retVal, err := PowOut2(out, selfScalar, exponent) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPrelu(weight *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Prelu(weight, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustProd(dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Prod(dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustProd1(dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Prod1(dim, keepdim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustProdOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.ProdOut(out, dim, keepdim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustPut_(index *Tensor, source *Tensor, accumulate bool) { + + err := ts.Put_(index, source, accumulate) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustQPerChannelScales(del bool) (retVal *Tensor) { + + retVal, err := ts.QPerChannelScales(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustQPerChannelZeroPoints(del bool) (retVal *Tensor) { + + retVal, err := ts.QPerChannelZeroPoints(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustQuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustQuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64) (retVal *Tensor) { + + retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustQuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { + + retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { + + retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustQuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { + + retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustQuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { + + retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Rand(size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRandLike(del bool) (retVal *Tensor) { + + retVal, err := ts.RandLike(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRandOut(out *Tensor, size []int64) (retVal *Tensor) { + + retVal, err := RandOut(out, size) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Randint(high, size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRandint1(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Randint1(low, high, size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRandintLike(high int64, del bool) (retVal *Tensor) { + + retVal, err := ts.RandintLike(high, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRandintLike1(low int64, high int64, del bool) (retVal *Tensor) { + + retVal, err := ts.RandintLike1(low, high, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRandintOut(out *Tensor, high int64, size []int64) (retVal *Tensor) { + + retVal, err := RandintOut(out, high, size) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRandintOut1(out *Tensor, low int64, high int64, size []int64) (retVal *Tensor) { + + retVal, err := RandintOut1(out, low, high, size) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Randn(size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRandnLike(del bool) (retVal *Tensor) { + + retVal, err := ts.RandnLike(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRandnOut(out *Tensor, size []int64) (retVal *Tensor) { + + retVal, err := RandnOut(out, size) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRandom_() { + + err := ts.Random_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRandom1_(to int64) { + + err := ts.Random1_(to) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRandom2(from int64, to int64) { + + err := ts.Random2(from, to) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Randperm(n, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRandpermOut(out *Tensor, n int64) (retVal *Tensor) { + + retVal, err := RandpermOut(out, n) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRange(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Range(start, end, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRange1(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Range1(start, end, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRangeOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor) { + + retVal, err := RangeOut(out, start, end) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReal(del bool) (retVal *Tensor) { + + retVal, err := ts.Real(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReciprocal(del bool) (retVal *Tensor) { + + retVal, err := ts.Reciprocal(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReciprocal_() { + + err := ts.Reciprocal_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustReciprocalOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ReciprocalOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReflectionPad1d(padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReflectionPad1d(padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReflectionPad1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReflectionPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dOut(out, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReflectionPad2d(padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReflectionPad2d(padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReflectionPad2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReflectionPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dOut(out, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRelu(del bool) (retVal *Tensor) { + + retVal, err := ts.Relu(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRelu_() { + + err := ts.Relu_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRemainder(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Remainder(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRemainder1(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Remainder1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRemainder_(other *Scalar) { + + err := ts.Remainder_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRemainder1_(other *Tensor) { + + err := ts.Remainder1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRemainderOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.RemainderOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRemainderOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.RemainderOut1(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRenorm(p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Renorm(p, dim, maxnorm, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRenorm_(p *Scalar, dim int64, maxnorm *Scalar) { + + err := ts.Renorm_(p, dim, maxnorm) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.RenormOut(out, p, dim, maxnorm, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRepeat(repeats []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Repeat(repeats, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRepeatInterleave(repeats *Tensor) (retVal *Tensor) { + + retVal, err := RepeatInterleave(repeats) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRepeatInterleave1(repeats *Tensor, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.RepeatInterleave1(repeats, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRepeatInterleave2(repeats int64, dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.RepeatInterleave2(repeats, dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad1d(padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad1d(padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dOut(out, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad2d(padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad2d(padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dOut(out, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad3d(padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad3d(padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReplicationPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dOut(out, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRequiresGrad_(requiresGrad bool) { + + err := ts.RequiresGrad_(requiresGrad) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustReshape(shape []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Reshape(shape, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustReshapeAs(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ReshapeAs(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustResize_(size []int64) { + + err := ts.Resize_(size) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustResizeAs_(theTemplate *Tensor) { + + err := ts.ResizeAs_(theTemplate) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRfft(signalNdim int64, normalized bool, onesided bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Rfft(signalNdim, normalized, onesided, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { + + retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { + + retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRoll(shifts []int64, dims []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Roll(shifts, dims, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRot90(k int64, dims []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Rot90(k, dims, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRound(del bool) (retVal *Tensor) { + + retVal, err := ts.Round(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRound_() { + + err := ts.Round_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRoundOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.RoundOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRrelu(training bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Rrelu(training, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRrelu_(training bool) { + + err := ts.Rrelu_(training) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRreluWithNoise(noise *Tensor, training bool, del bool) (retVal *Tensor) { + + retVal, err := ts.RreluWithNoise(noise, training, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRreluWithNoise_(noise *Tensor, training bool) { + + err := ts.RreluWithNoise_(noise, training) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool) (retVal *Tensor) { + + retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool) (retVal *Tensor) { + + retVal, err := ts.RreluWithNoiseOut(out, noise, training, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRsqrt(del bool) (retVal *Tensor) { + + retVal, err := ts.Rsqrt(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRsqrt_() { + + err := ts.Rsqrt_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustRsqrtOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.RsqrtOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRsub(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Rsub(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustRsub1(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Rsub1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := ScalarTensor(s, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustScatter(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Scatter(dim, index, src, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustScatter1(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Scatter1(dim, index, value, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustScatter_(dim int64, index *Tensor, src *Tensor) { + + err := ts.Scatter_(dim, index, src) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustScatter1_(dim int64, index *Tensor, value *Scalar) { + + err := ts.Scatter1_(dim, index, value) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustScatterAdd(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ScatterAdd(dim, index, src, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustScatterAdd_(dim int64, index *Tensor, src *Tensor) { + + err := ts.ScatterAdd_(dim, index, src) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSelect(dim int64, index int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Select(dim, index, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSelu(del bool) (retVal *Tensor) { + + retVal, err := ts.Selu(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSelu_() { + + err := ts.Selu_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSet_() { + + err := ts.Set_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSet1_(source *Tensor) { + + err := ts.Set1_(source) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSetRequiresGrad(r bool, del bool) (retVal *Tensor) { + + retVal, err := ts.SetRequiresGrad(r, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSigmoid(del bool) (retVal *Tensor) { + + retVal, err := ts.Sigmoid(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSigmoid_() { + + err := ts.Sigmoid_() + if err != nil { + log.Fatal(err) + } + + return +} + +func MustSigmoidBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor) { + + retVal, err := SigmoidBackward(gradOutput, output) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustSigmoidBackwardOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor) { + + retVal, err := SigmoidBackwardOut(gradInput, gradOutput, output) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSigmoidOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SigmoidOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSign(del bool) (retVal *Tensor) { + + retVal, err := ts.Sign(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSign_() { + + err := ts.Sign_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSignOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SignOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSin(del bool) (retVal *Tensor) { + + retVal, err := ts.Sin(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSin_() { + + err := ts.Sin_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSinOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SinOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSinh(del bool) (retVal *Tensor) { + + retVal, err := ts.Sinh(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSinh_() { + + err := ts.Sinh_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSinhOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SinhOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlice(dim int64, start int64, end int64, step int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Slice(dim, start, end, step, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSmm(mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Smm(mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSmoothL1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SmoothL1Loss(target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSmoothL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SmoothL1LossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SmoothL1LossOut(out, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftMarginLoss(target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftMarginLossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftMarginLossOut(out, target, reduction, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Softmax(dim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftplus(del bool) (retVal *Tensor) { + + retVal, err := ts.Softplus(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, output, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftplusBackwardOut(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftplusBackwardOut(gradInput, gradOutput, beta, threshold, output, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftplusOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftplusOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftshrink(del bool) (retVal *Tensor) { + + retVal, err := ts.Softshrink(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftshrinkBackwardOut(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftshrinkBackwardOut(gradInput, gradOutput, lambd, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSoftshrinkOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SoftshrinkOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := SparseCooTensor(size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustSparseCooTensor1(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := SparseCooTensor1(indices, values, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustSparseCooTensor2(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := SparseCooTensor2(indices, values, size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSparseMask(mask *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SparseMask(mask, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64) { + + err := ts.SparseResize_(size, sparseDim, denseDim) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64) { + + err := ts.SparseResizeAndClear_(size, sparseDim, denseDim) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSqrt(del bool) (retVal *Tensor) { + + retVal, err := ts.Sqrt(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSqrt_() { + + err := ts.Sqrt_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSqrtOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SqrtOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSquare(del bool) (retVal *Tensor) { + + retVal, err := ts.Square(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSquare_() { + + err := ts.Square_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSqueeze(del bool) (retVal *Tensor) { + + retVal, err := ts.Squeeze(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSqueeze1(dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Squeeze1(dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSqueeze_() { + + err := ts.Squeeze_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSqueeze1_(dim int64) { + + err := ts.Squeeze1_(dim) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSspaddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Sspaddmm(mat1, mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SspaddmmOut(out, mat1, mat2, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustStack(tensors []Tensor, dim int64) (retVal *Tensor) { + + retVal, err := Stack(tensors, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustStackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { + + retVal, err := StackOut(out, tensors, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustStd(unbiased bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Std(unbiased, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustStd1(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Std1(dim, unbiased, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustStdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustStft(nFft int64, hopLength int64, winLength int64, window *Tensor, normalized bool, onesided bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSub(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Sub(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSub1(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Sub1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSub_(other *Tensor) { + + err := ts.Sub_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSub1_(other *Scalar) { + + err := ts.Sub1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustSubOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.SubOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSum(dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Sum(dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSum1(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Sum1(dim, keepdim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSumOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.SumOut(out, dim, keepdim, dtype, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustSumToSize(size []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.SumToSize(size, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustT(del bool) (retVal *Tensor) { + + retVal, err := ts.T(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustT_() { + + err := ts.T_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustTake(index *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Take(index, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTakeOut(out *Tensor, index *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.TakeOut(out, index, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTan(del bool) (retVal *Tensor) { + + retVal, err := ts.Tan(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTan_() { + + err := ts.Tan_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustTanOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.TanOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTanh(del bool) (retVal *Tensor) { + + retVal, err := ts.Tanh(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTanh_() { + + err := ts.Tanh_() + if err != nil { + log.Fatal(err) + } + + return +} + +func MustTanhBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor) { + + retVal, err := TanhBackward(gradOutput, output) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustTanhBackwardOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor) { + + retVal, err := TanhBackwardOut(gradInput, gradOutput, output) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTanhOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.TanhOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustThreshold(threshold *Scalar, value *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.Threshold(threshold, value, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustThreshold_(threshold *Scalar, value *Scalar) { + + err := ts.Threshold_(threshold, value) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.ThresholdBackward(gradOutput, threshold, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.ThresholdOut(out, threshold, value, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTo(device gotch.Device, del bool) (retVal *Tensor) { + + retVal, err := ts.To(device, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTo1(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { + + retVal, err := ts.To1(optionsKind, optionsDevice, nonBlocking, copy, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTo2(dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { + + retVal, err := ts.To2(dtype, nonBlocking, copy, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTo3(other *Tensor, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { + + retVal, err := ts.To3(other, nonBlocking, copy, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTo4(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { + + retVal, err := ts.To4(device, dtype, nonBlocking, copy, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustToDense(del bool) (retVal *Tensor) { + + retVal, err := ts.ToDense(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustToDenseBackward(grad *Tensor, input *Tensor) (retVal *Tensor) { + + retVal, err := ToDenseBackward(grad, input) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustToMkldnn(del bool) (retVal *Tensor) { + + retVal, err := ts.ToMkldnn(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustToMkldnnBackward(grad *Tensor, input *Tensor) (retVal *Tensor) { + + retVal, err := ToMkldnnBackward(grad, input) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustToSparse(del bool) (retVal *Tensor) { + + retVal, err := ts.ToSparse(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustToSparse1(sparseDim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.ToSparse1(sparseDim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTotype(scalarType gotch.DType, del bool) (retVal *Tensor) { + + retVal, err := ts.Totype(scalarType, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTrace(del bool) (retVal *Tensor) { + + retVal, err := ts.Trace(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Transpose(dim0, dim1, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTranspose_(dim0 int64, dim1 int64) { + + err := ts.Transpose_(dim0, dim1) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustTrapz(y *Tensor, x *Tensor, dim int64) (retVal *Tensor) { + + retVal, err := Trapz(y, x, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustTrapz1(y *Tensor, dx float64, dim int64) (retVal *Tensor) { + + retVal, err := Trapz1(y, dx, dim) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTril(diagonal int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Tril(diagonal, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTril_(diagonal int64) { + + err := ts.Tril_(diagonal) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTrilOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { + + retVal, err := ts.TrilOut(out, diagonal, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustTripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64) (retVal *Tensor) { + + retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTriu(diagonal int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Triu(diagonal, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTriu_(diagonal int64) { + + err := ts.Triu_(diagonal) + if err != nil { + log.Fatal(err) + } + + return +} + +func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTriuOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { + + retVal, err := ts.TriuOut(out, diagonal, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTrueDivide(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.TrueDivide(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTrueDivide1(other *Scalar, del bool) (retVal *Tensor) { + + retVal, err := ts.TrueDivide1(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTrueDivide_(other *Tensor) { + + err := ts.TrueDivide_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustTrueDivide1_(other *Scalar) { + + err := ts.TrueDivide1_(other) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustTrueDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.TrueDivideOut(out, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTrunc(del bool) (retVal *Tensor) { + + retVal, err := ts.Trunc(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTrunc_() { + + err := ts.Trunc_() + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustTruncOut(out *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.TruncOut(out, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustTypeAs(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.TypeAs(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUnfold(dimension int64, size int64, step int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Unfold(dimension, size, step, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUniform_(from float64, to float64) { + + err := ts.Uniform_(from, to) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustUnsqueeze(dim int64, del bool) (retVal *Tensor) { + + retVal, err := ts.Unsqueeze(dim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUnsqueeze_(dim int64) { + + err := ts.Unsqueeze_(dim) + if err != nil { + log.Fatal(err) + } + + return +} + +func (ts *Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleBicubic2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleBicubic2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleBilinear2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleBilinear2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64) (retVal *Tensor) { + + retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleLinear1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64) (retVal *Tensor) { + + retVal, err := UpsampleLinear1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleNearest1d(outputSize []int64, scales float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleNearest1d(outputSize, scales, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales float64) (retVal *Tensor) { + + retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleNearest1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales float64) (retVal *Tensor) { + + retVal, err := UpsampleNearest1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scales) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleNearest1dOut(out *Tensor, outputSize []int64, scales float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleNearest2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleNearest2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleNearest3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleNearest3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustUpsampleTrilinear3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor) { + + retVal, err := UpsampleTrilinear3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustUpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { + + retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustValues(del bool) (retVal *Tensor) { + + retVal, err := ts.Values(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustVar(unbiased bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Var(unbiased, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustVar1(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.Var1(dim, unbiased, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustVarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { + + retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustView(size []int64, del bool) (retVal *Tensor) { + + retVal, err := ts.View(size, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustViewAs(other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.ViewAs(other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustWhere1(condition *Tensor, other *Tensor, del bool) (retVal *Tensor) { + + retVal, err := ts.Where1(condition, other, del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustZero_() { + + err := ts.Zero_() + if err != nil { + log.Fatal(err) + } + + return +} + +func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { + + retVal, err := Zeros(size, optionsKind, optionsDevice) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func (ts *Tensor) MustZerosLike(del bool) (retVal *Tensor) { + + retVal, err := ts.ZerosLike(del) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +func MustZerosOut(out *Tensor, size []int64) (retVal *Tensor) { + + retVal, err := ZerosOut(out, size) + if err != nil { + log.Fatal(err) + } + + return retVal +} + +// End of implementing Tensor ================================= diff --git a/tensor/optimizer.go b/tensor/optimizer.go index f011257..6be1460 100644 --- a/tensor/optimizer.go +++ b/tensor/optimizer.go @@ -11,20 +11,18 @@ type COptimizer struct { } // Adam returns Adam optimizer -func Adam(lr, beta1, beta2, weightDecay float64) (retVal COptimizer, err error) { +func Adam(lr, beta1, beta2, weightDecay float64) (*COptimizer, error) { coptimizer := lib.AtoAdam(lr, beta1, beta2, weightDecay) - err = TorchErr() - if err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } - retVal = COptimizer{coptimizer} - return retVal, nil + return &COptimizer{coptimizer}, nil } // RmsProp returns RMSProp optimizer -func RmsProp(lr, alpha, eps, wd, momentum float64, centered bool) (retVal COptimizer, err error) { +func RmsProp(lr, alpha, eps, wd, momentum float64, centered bool) (*COptimizer, error) { var centeredCInt int switch centered { case true: @@ -34,19 +32,15 @@ func RmsProp(lr, alpha, eps, wd, momentum float64, centered bool) (retVal COptim } coptimizer := lib.AtoRmsProp(lr, alpha, eps, wd, momentum, centeredCInt) - err = TorchErr() - if err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } - retVal = COptimizer{coptimizer} - - return retVal, nil - + return &COptimizer{coptimizer}, nil } // Sgd returns SGD optimizer -func Sgd(lr, momentum, dampening, wd float64, nesterov bool) (retVal COptimizer, err error) { +func Sgd(lr, momentum, dampening, wd float64, nesterov bool) (*COptimizer, error) { var nesterovCInt int switch nesterov { case true: @@ -56,18 +50,15 @@ func Sgd(lr, momentum, dampening, wd float64, nesterov bool) (retVal COptimizer, } coptimizer := lib.AtoSgd(lr, momentum, dampening, wd, nesterovCInt) - err = TorchErr() - if err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } - retVal = COptimizer{coptimizer} - - return retVal, nil + return &COptimizer{coptimizer}, nil } // AddParameters adds parameters as a slice of tensors to optimizer -func (co COptimizer) AddParameters(tensors []Tensor) (err error) { +func (co *COptimizer) AddParameters(tensors []Tensor) error { var ctensors []lib.Ctensor for _, t := range tensors { @@ -82,35 +73,35 @@ func (co COptimizer) AddParameters(tensors []Tensor) (err error) { } // SetLeanringRate sets learning rate for the optimizer -func (co COptimizer) SetLearningRate(lr float64) (err error) { +func (co *COptimizer) SetLearningRate(lr float64) error { lib.AtoSetLearningRate(co.coptimizer, lr) return TorchErr() } // SetMomentum sets a momentum for the optimizer -func (co COptimizer) SetMomentum(m float64) (err error) { +func (co *COptimizer) SetMomentum(m float64) error { lib.AtoSetMomentum(co.coptimizer, m) return TorchErr() } // ZeroGrad sets gradients to zero -func (co COptimizer) ZeroGrad() (err error) { +func (co *COptimizer) ZeroGrad() error { lib.AtoZeroGrad(co.coptimizer) return TorchErr() } // Steps proceeds optimizer -func (co COptimizer) Step() (err error) { +func (co *COptimizer) Step() error { lib.AtoStep(co.coptimizer) return TorchErr() } // Drop removes optimizer and frees up memory. -func (co COptimizer) Drop() { +func (co *COptimizer) Drop() { lib.AtoFree(co.coptimizer) if err := TorchErr(); err != nil { diff --git a/tensor/other.go b/tensor/other.go index 1431a06..2c1db26 100644 --- a/tensor/other.go +++ b/tensor/other.go @@ -7,7 +7,7 @@ import ( ) // CrossEntropyForLogits computes the cross-entropy loss based on some logits and targets. -func (ts Tensor) CrossEntropyForLogits(targets Tensor) (retVal Tensor) { +func (ts *Tensor) CrossEntropyForLogits(targets *Tensor) (retVal *Tensor) { weight := NewTensor() reduction := int64(1) // Mean of loss ignoreIndex := int64(-100) @@ -18,13 +18,13 @@ func (ts Tensor) CrossEntropyForLogits(targets Tensor) (retVal Tensor) { // AccuracyForLogits returns the average accuracy for some given logits assuming that // targets represent ground-truth. -func (ts Tensor) AccuracyForLogits(targets Tensor) (retVal Tensor) { +func (ts *Tensor) AccuracyForLogits(targets *Tensor) (retVal *Tensor) { argmax := ts.MustArgmax(-1, false, true) eq1 := argmax.MustEq1(targets, true) return eq1.MustTotype(gotch.Float, true).MustMean(gotch.Float, true) } -func (ts Tensor) MaxPool2DDefault(ksize int64, del bool) (retVal Tensor) { +func (ts *Tensor) MaxPool2DDefault(ksize int64, del bool) (retVal *Tensor) { return ts.MustMaxPool2d([]int64{ksize, ksize}, []int64{ksize, ksize}, []int64{0, 0}, []int64{1, 1}, false, del) } diff --git a/tensor/patch.go b/tensor/patch.go index e2f8ead..fc167d2 100644 --- a/tensor/patch.go +++ b/tensor/patch.go @@ -13,7 +13,7 @@ import ( // NOTE. This is a temporarily patched to make it run. // TODO. make change at generator for []Tensor input -func (ts Tensor) Lstm(hxData []Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h, c Tensor, err error) { +func (ts *Tensor) Lstm(hxData []Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h, c *Tensor, err error) { // NOTE: `atg_lstm` will create 3 consecutive Ctensors in memory of C land. The first // Ctensor will have address given by `ctensorPtr1` here. @@ -55,11 +55,11 @@ func (ts Tensor) Lstm(hxData []Tensor, paramsData []Tensor, hasBiases bool, numL return output, h, c, err } - return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, Tensor{ctensor: *ctensorPtr3}, nil + return &Tensor{ctensor: *ctensorPtr1}, &Tensor{ctensor: *ctensorPtr2}, &Tensor{ctensor: *ctensorPtr3}, nil } -func (ts Tensor) MustLstm(hxData []Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h, c Tensor) { +func (ts *Tensor) MustLstm(hxData []Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h, c *Tensor) { output, h, c, err := ts.Lstm(hxData, paramsData, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) if err != nil { @@ -69,7 +69,7 @@ func (ts Tensor) MustLstm(hxData []Tensor, paramsData []Tensor, hasBiases bool, return output, h, c } -func (ts Tensor) Gru(hx Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h Tensor, err error) { +func (ts *Tensor) Gru(hx *Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h *Tensor, err error) { // NOTE: `atg_gru` will create 2 consecutive Ctensors in memory of C land. // The first Ctensor will have address given by `ctensorPtr1` here. @@ -105,11 +105,11 @@ func (ts Tensor) Gru(hx Tensor, paramsData []Tensor, hasBiases bool, numLayers i return output, h, err } - return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, nil + return &Tensor{ctensor: *ctensorPtr1}, &Tensor{ctensor: *ctensorPtr2}, nil } -func (ts Tensor) MustGru(hx Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h Tensor) { +func (ts *Tensor) MustGru(hx *Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h *Tensor) { output, h, err := ts.Gru(hx, paramsData, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) if err != nil { log.Fatal(err) @@ -118,7 +118,7 @@ func (ts Tensor) MustGru(hx Tensor, paramsData []Tensor, hasBiases bool, numLaye return output, h } -func (ts Tensor) TopK(k int64, dim int64, largest bool, sorted bool) (ts1 Tensor, ts2 Tensor, err error) { +func (ts *Tensor) TopK(k int64, dim int64, largest bool, sorted bool) (ts1, ts2 *Tensor, err error) { // NOTE: `lib.AtgTopk` will return 2 tensors in C memory. First tensor pointer // is given by ctensorPtr1 @@ -139,10 +139,10 @@ func (ts Tensor) TopK(k int64, dim int64, largest bool, sorted bool) (ts1 Tensor return ts1, ts2, err } - return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, nil + return &Tensor{ctensor: *ctensorPtr1}, &Tensor{ctensor: *ctensorPtr2}, nil } -func (ts Tensor) MustTopK(k int64, dim int64, largest bool, sorted bool) (ts1 Tensor, ts2 Tensor) { +func (ts *Tensor) MustTopK(k int64, dim int64, largest bool, sorted bool) (ts1, ts2 *Tensor) { ts1, ts2, err := ts.TopK(k, dim, largest, sorted) if err != nil { @@ -154,7 +154,7 @@ func (ts Tensor) MustTopK(k int64, dim int64, largest bool, sorted bool) (ts1 Te // NOTE. `NLLLoss` is a version of `NllLoss` in tensor-generated // with default weight, reduction and ignoreIndex -func (ts Tensor) NLLLoss(target Tensor, del bool) (retVal Tensor, err error) { +func (ts *Tensor) NLLLoss(target Tensor, del bool) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) if del { defer ts.MustDrop() @@ -169,12 +169,12 @@ func (ts Tensor) NLLLoss(target Tensor, del bool) (retVal Tensor, err error) { return retVal, err } - retVal = Tensor{ctensor: *ptr} + retVal = &Tensor{ctensor: *ptr} return retVal, nil } -func (ts Tensor) MustNLLLoss(target Tensor, del bool) (retVal Tensor) { +func (ts *Tensor) MustNLLLoss(target Tensor, del bool) (retVal *Tensor) { retVal, err := ts.NLLLoss(target, del) if err != nil { log.Fatal(err) @@ -285,7 +285,7 @@ func MustBroadcastTensors(tensors []Tensor, del bool) (retVal []Tensor) { } // tensor *atg_chunk(tensor self, int64_t chunks, int64_t dim); -func (ts Tensor) Chunk(chunks int64, dim int64) (retVal []Tensor, err error) { +func (ts *Tensor) Chunk(chunks int64, dim int64) (retVal []Tensor, err error) { ctensorsPtr := lib.AtgChunk(ts.ctensor, chunks, dim) if err = TorchErr(); err != nil { return retVal, err @@ -307,7 +307,7 @@ func (ts Tensor) Chunk(chunks int64, dim int64) (retVal []Tensor, err error) { return retVal, nil } -func (ts Tensor) MustChunk(chunks int64, dim int64, del bool) (retVal []Tensor) { +func (ts *Tensor) MustChunk(chunks int64, dim int64, del bool) (retVal []Tensor) { if del { defer ts.MustDrop() } @@ -321,7 +321,7 @@ func (ts Tensor) MustChunk(chunks int64, dim int64, del bool) (retVal []Tensor) } // tensor *atg_meshgrid(tensor *tensors_data, int tensors_len); -func (ts Tensor) Meshgrid(tensors []Tensor) (retVal []Tensor, err error) { +func (ts *Tensor) Meshgrid(tensors []Tensor) (retVal []Tensor, err error) { var ctensors []lib.Ctensor for _, t := range tensors { @@ -348,7 +348,7 @@ func (ts Tensor) Meshgrid(tensors []Tensor) (retVal []Tensor, err error) { return retVal, nil } -func (ts Tensor) MustMeshgrid(tensors []Tensor, del bool) (retVal []Tensor) { +func (ts *Tensor) MustMeshgrid(tensors []Tensor, del bool) (retVal []Tensor) { if del { defer ts.MustDrop() } @@ -362,7 +362,7 @@ func (ts Tensor) MustMeshgrid(tensors []Tensor, del bool) (retVal []Tensor) { } // tensor *atg_nonzero_numpy(tensor self); -func (ts Tensor) NonzeroNumpy() (retVal []Tensor, err error) { +func (ts *Tensor) NonzeroNumpy() (retVal []Tensor, err error) { ctensorsPtr := lib.AtgNonzeroNumpy(ts.ctensor) if err = TorchErr(); err != nil { @@ -384,7 +384,7 @@ func (ts Tensor) NonzeroNumpy() (retVal []Tensor, err error) { return retVal, nil } -func (ts Tensor) MustNonzeroNumpy(del bool) (retVal []Tensor) { +func (ts *Tensor) MustNonzeroNumpy(del bool) (retVal []Tensor) { if del { defer ts.MustDrop() } @@ -403,7 +403,7 @@ func (ts Tensor) MustNonzeroNumpy(del bool) (retVal []Tensor) { // - splitSize – size of a single chunk // - dim – dimension along which to split the tensor. // Ref. https://pytorch.org/docs/stable/generated/torch.split.html -func (ts Tensor) Split(splitSize, dim int64) (retVal []Tensor, err error) { +func (ts *Tensor) Split(splitSize, dim int64) (retVal []Tensor, err error) { ctensorsPtr := lib.AtgSplit(ts.ctensor, splitSize, dim) if err = TorchErr(); err != nil { @@ -430,7 +430,7 @@ func (ts Tensor) Split(splitSize, dim int64) (retVal []Tensor, err error) { return retVal, nil } -func (ts Tensor) MustSplit(splitSize, dim int64, del bool) (retVal []Tensor) { +func (ts *Tensor) MustSplit(splitSize, dim int64, del bool) (retVal []Tensor) { if del { defer ts.MustDrop() } @@ -449,7 +449,7 @@ func (ts Tensor) MustSplit(splitSize, dim int64, del bool) (retVal []Tensor) { // - splitSizes – slice of sizes for each chunk // - dim – dimension along which to split the tensor. // Ref. https://pytorch.org/docs/stable/generated/torch.split.html -func (ts Tensor) SplitWithSizes(splitSizes []int64, dim int64) (retVal []Tensor, err error) { +func (ts *Tensor) SplitWithSizes(splitSizes []int64, dim int64) (retVal []Tensor, err error) { ctensorsPtr := lib.AtgSplitWithSizes(ts.ctensor, splitSizes, len(splitSizes), dim) if err = TorchErr(); err != nil { @@ -476,7 +476,7 @@ func (ts Tensor) SplitWithSizes(splitSizes []int64, dim int64) (retVal []Tensor, return retVal, nil } -func (ts Tensor) MustSplitWithSizes(splitSizes []int64, dim int64, del bool) (retVal []Tensor) { +func (ts *Tensor) MustSplitWithSizes(splitSizes []int64, dim int64, del bool) (retVal []Tensor) { if del { defer ts.MustDrop() } @@ -490,7 +490,7 @@ func (ts Tensor) MustSplitWithSizes(splitSizes []int64, dim int64, del bool) (re } // tensor *atg_unbind(tensor self, int64_t dim); -func (ts Tensor) Unbind(dim int64) (retVal []Tensor, err error) { +func (ts *Tensor) Unbind(dim int64) (retVal []Tensor, err error) { ctensorsPtr := lib.AtgUnbind(ts.ctensor, dim) if err = TorchErr(); err != nil { @@ -512,7 +512,7 @@ func (ts Tensor) Unbind(dim int64) (retVal []Tensor, err error) { return retVal, nil } -func (ts Tensor) MustUnbind(dim int64, del bool) (retVal []Tensor) { +func (ts *Tensor) MustUnbind(dim int64, del bool) (retVal []Tensor) { if del { defer ts.MustDrop() } diff --git a/tensor/scalar.go b/tensor/scalar.go index 3188e37..dae8bdd 100644 --- a/tensor/scalar.go +++ b/tensor/scalar.go @@ -12,19 +12,19 @@ type Scalar struct { } // IntScalar creates a integer scalar -func IntScalar(v int64) Scalar { +func IntScalar(v int64) *Scalar { cscalar := lib.AtsInt(v) - return Scalar{cscalar} + return &Scalar{cscalar} } // FloatScalar creates a float scalar -func FloatScalar(v float64) Scalar { +func FloatScalar(v float64) *Scalar { cscalar := lib.AtsFloat(v) - return Scalar{cscalar} + return &Scalar{cscalar} } // ToInt returns a integer value -func (sc Scalar) ToInt() (retVal int64, err error) { +func (sc *Scalar) ToInt() (retVal int64, err error) { retVal = lib.AtsToInt(sc.cscalar) err = TorchErr() if err != nil { @@ -35,7 +35,7 @@ func (sc Scalar) ToInt() (retVal int64, err error) { } // ToFloat returns a float value -func (sc Scalar) ToFloat() (retVal float64, err error) { +func (sc *Scalar) ToFloat() (retVal float64, err error) { retVal = lib.AtsToFloat(sc.cscalar) err = TorchErr() if err != nil { @@ -46,7 +46,7 @@ func (sc Scalar) ToFloat() (retVal float64, err error) { } // ToString returns a string representation of scalar value -func (sc Scalar) ToString() (retVal string, err error) { +func (sc *Scalar) ToString() (retVal string, err error) { retVal = lib.AtsToString(sc.cscalar) err = TorchErr() if err != nil { @@ -60,12 +60,12 @@ func (sc Scalar) ToString() (retVal string, err error) { // // TODO: Really? after running s.Drop() and s.ToInt() // it returns Zero. -func (sc Scalar) Drop() (err error) { +func (sc *Scalar) Drop() (err error) { lib.AtsFree(sc.cscalar) return TorchErr() } -func (sc Scalar) MustDrop() { +func (sc *Scalar) MustDrop() { lib.AtsFree(sc.cscalar) if err := TorchErr(); err != nil { log.Fatal(err) diff --git a/tensor/tensor-generated.go b/tensor/tensor-generated.go index f8598c0..8a4c832 100644 --- a/tensor/tensor-generated.go +++ b/tensor/tensor-generated.go @@ -5,13031 +5,14819 @@ package tensor // #include "stdlib.h" import "C" -import( - "unsafe" +import ( + "unsafe" - "github.com/sugarme/gotch" - lib "github.com/sugarme/gotch/libtch" + "github.com/sugarme/gotch" + lib "github.com/sugarme/gotch/libtch" ) +func (ts *Tensor) __And_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) -func(ts Tensor) __And_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__And_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __And1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__And1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Iand_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Iand_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Iand1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Iand1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Ilshift_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ilshift_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Ilshift1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ilshift1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Ior_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ior_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Ior1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ior1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Irshift_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Irshift_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Irshift1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Irshift1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Ixor_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ixor_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Ixor1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Ixor1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Lshift_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Lshift_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Lshift1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Lshift1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Or_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Or_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Or1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Or1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Rshift_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Rshift_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Rshift1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Rshift1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Xor_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Xor_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) __Xor1(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg__Xor1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) _AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _AdaptiveAvgPool2dBackward(gradOutput Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _Addr(vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Addr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _Addr_(vec1 Tensor, vec2 Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Addr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) _AddrOut(out Tensor, vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _AmpUpdateScale(growthTracker Tensor, currentScale Tensor, foundInf Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_AmpUpdateScale(ptr, growthTracker.ctensor, currentScale.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _BaddbmmMkl_(batch1 Tensor, batch2 Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_BaddbmmMkl_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) _CastByte(nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CastByte(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CastChar(nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CastChar(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CastDouble(nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CastDouble(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CastFloat(nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CastFloat(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CastHalf(nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CastHalf(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CastInt(nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CastInt(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CastLong(nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CastLong(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CastShort(nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CastShort(ptr, ts.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _Cat(tensors []Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} -lib.Atg_Cat(ptr, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CatOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} -lib.Atg_CatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CdistBackward(grad Tensor, x1 Tensor, x2 Tensor, p float64, cdist Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_CdistBackward(ptr, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CholeskyHelper(upper bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { cupper = int32(1) } -lib.Atg_CholeskyHelper(ptr, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CholeskySolveHelper(a Tensor, upper bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { cupper = int32(1) } -lib.Atg_CholeskySolveHelper(ptr, ts.ctensor, a.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _Coalesced_(coalesced bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ccoalesced := int32(0) - if coalesced { ccoalesced = int32(1) } -lib.Atg_Coalesced_(ptr, ts.ctensor, ccoalesced) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func _Convolution(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { ctransposed = int32(1) } -cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -ccudnnEnabled := int32(0) - if cudnnEnabled { ccudnnEnabled = int32(1) } -lib.Atg_Convolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _ConvolutionNogroup(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { ctransposed = int32(1) } -lib.Atg_ConvolutionNogroup(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CopyFrom(dst Tensor, nonBlocking bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.Atg_CopyFrom(ptr, ts.ctensor, dst.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CtcLossBackward(grad Tensor, logProbs Tensor, targets Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood Tensor, logAlpha Tensor, blank int64, zeroInfinity bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - czeroInfinity := int32(0) - if zeroInfinity { czeroInfinity = int32(1) } -lib.Atg_CtcLossBackward(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.Atg_CudnnInitDropoutState(ptr, dropout, ctrain, dropoutSeed, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cweightArr []lib.Ctensor - for _, t := range weightArr {cweightArr = append(cweightArr, t.ctensor)} -cbatchFirst := int32(0) - if batchFirst { cbatchFirst = int32(1) } -cbidirectional := int32(0) - if bidirectional { cbidirectional = int32(1) } -lib.Atg_CudnnRnnFlattenWeight(ptr, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, numLayers, cbatchFirst, cbidirectional) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _Cumprod(dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Cumprod(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CumprodOut(out Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_CumprodOut(ptr, out.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _Cumsum(dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Cumsum(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _CumsumOut(out Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_CumsumOut(ptr, out.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _DimArange(like Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_DimArange(ptr, like.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _DirichletGrad(x Tensor, alpha Tensor, total Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_DirichletGrad(ptr, x.ctensor, alpha.ctensor, total.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmbeddingBagBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, maximumIndices Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { cscaleGradByFreq = int32(1) } -csparse := int32(0) - if sparse { csparse = int32(1) } -lib.Atg_EmbeddingBagBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmbeddingBagDenseBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, maximumIndices Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { cscaleGradByFreq = int32(1) } -lib.Atg_EmbeddingBagDenseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmbeddingBagPerSampleWeightsBackward(grad Tensor, weight Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, mode int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_EmbeddingBagPerSampleWeightsBackward(ptr, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmbeddingBagSparseBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { cscaleGradByFreq = int32(1) } -lib.Atg_EmbeddingBagSparseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_EmptyAffineQuantized(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt(), scale, zeroPoint) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _EmptyPerChannelAffineQuantized(size []int64, scales Tensor, zeroPoints Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_EmptyPerChannelAffineQuantized(ptr, size, len(size), scales.ctensor, zeroPoints.ctensor, axis, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ccomplexInput := int32(0) - if complexInput { ccomplexInput = int32(1) } -ccomplexOutput := int32(0) - if complexOutput { ccomplexOutput = int32(1) } -cinverse := int32(0) - if inverse { cinverse = int32(1) } -cnormalized := int32(0) - if normalized { cnormalized = int32(1) } -conesided := int32(0) - if onesided { conesided = int32(1) } -lib.Atg_FftWithSize(ptr, ts.ctensor, signalNdim, ccomplexInput, ccomplexOutput, cinverse, checkedSignalSizes, len(checkedSignalSizes), cnormalized, conesided, outputSizes, len(outputSizes)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _GatherSparseBackward(dim int64, index Tensor, grad Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_GatherSparseBackward(ptr, ts.ctensor, dim, index.ctensor, grad.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _IndexCopy_(dim int64, index Tensor, source Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_IndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) _IndexPutImpl_(indices []Tensor, values Tensor, accumulate bool, unsafety bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cindices []lib.Ctensor - for _, t := range indices {cindices = append(cindices, t.ctensor)} -caccumulate := int32(0) - if accumulate { caccumulate = int32(1) } -cunsafety := int32(0) - if unsafety { cunsafety = int32(1) } -lib.Atg_IndexPutImpl_(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate, cunsafety) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) _Indices(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Indices(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _InverseHelper(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_InverseHelper(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chalfToFloat := int32(0) - if halfToFloat { chalfToFloat = int32(1) } -lib.Atg_LogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _LogSoftmaxBackwardData(gradOutput Tensor, output Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_LogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _LuSolveHelper(lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_LuSolveHelper(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _MakePerChannelQuantizedTensor(scale Tensor, zeroPoint Tensor, axis int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MakePerChannelQuantizedTensor(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MakePerTensorQuantizedTensor(ptr, ts.ctensor, scale, zeroPoint) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _MaskedScale(mask Tensor, scale float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MaskedScale(ptr, ts.ctensor, mask.ctensor, scale) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _MkldnnReshape(shape []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MkldnnReshape(ptr, ts.ctensor, shape, len(shape)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MkldnnTranspose(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _MkldnnTranspose_(dim0 int64, dim1 int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MkldnnTranspose_(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func _MultinomialAliasDraw(j Tensor, q Tensor, numSamples int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_MultinomialAliasDraw(ptr, j.ctensor, q.ctensor, numSamples) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _NnpackSpatialConvolution(input Tensor, weight Tensor, bias Tensor, padding []int64, stride []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_NnpackSpatialConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _NnpackSpatialConvolutionBackwardInput(input Tensor, gradOutput Tensor, weight Tensor, padding []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_NnpackSpatialConvolutionBackwardInput(ptr, input.ctensor, gradOutput.ctensor, weight.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _NnpackSpatialConvolutionBackwardWeight(input Tensor, weightsize []int64, gradOutput Tensor, padding []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_NnpackSpatialConvolutionBackwardWeight(ptr, input.ctensor, weightsize, len(weightsize), gradOutput.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _PackPaddedSequenceBackward(grad Tensor, inputSize []int64, batchSizes Tensor, batchFirst bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbatchFirst := int32(0) - if batchFirst { cbatchFirst = int32(1) } -lib.Atg_PackPaddedSequenceBackward(ptr, grad.ctensor, inputSize, len(inputSize), batchSizes.ctensor, cbatchFirst) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _PdistBackward(grad Tensor, p float64, pdist Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_PdistBackward(ptr, grad.ctensor, ts.ctensor, p, pdist.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _ReshapeFromTensor(shape Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ReshapeFromTensor(ptr, ts.ctensor, shape.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SWhere(condition Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SWhere(ptr, condition.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SampleDirichlet(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SampleDirichlet(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _ShapeAsTensor(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_ShapeAsTensor(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SobolEngineFf_(n int64, sobolstate Tensor, dimension int64, numGenerated int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SobolEngineFf_(ptr, ts.ctensor, n, sobolstate.ctensor, dimension, numGenerated) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) _SobolEngineInitializeState_(dimension int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SobolEngineInitializeState_(ptr, ts.ctensor, dimension) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) _SobolEngineScramble_(ltm Tensor, dimension int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SobolEngineScramble_(ptr, ts.ctensor, ltm.ctensor, dimension) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) _Softmax(dim int64, halfToFloat bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - chalfToFloat := int32(0) - if halfToFloat { chalfToFloat = int32(1) } -lib.Atg_Softmax(ptr, ts.ctensor, dim, chalfToFloat) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SoftmaxBackwardData(gradOutput Tensor, output Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SparseAddmm(sparse Tensor, dense Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseAddmm(ptr, ts.ctensor, sparse.ctensor, dense.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseCooTensorUnsafe(indices Tensor, values Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseCooTensorUnsafe(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseCooTensorWithDims(ptr, sparseDim, denseDim, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices Tensor, values Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseCooTensorWithDimsAndTensors(ptr, sparseDim, denseDim, size, len(size), indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _SparseMm(sparse Tensor, dense Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseMm(ptr, sparse.ctensor, dense.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SparseSum(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSum(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SparseSum1(dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSum1(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SparseSum2(dim []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSum2(ptr, ts.ctensor, dim, len(dim)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SparseSum3(dim []int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSum3(ptr, ts.ctensor, dim, len(dim), dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _SparseSumBackward(grad Tensor, dim []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_SparseSumBackward(ptr, grad.ctensor, ts.ctensor, dim, len(dim)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _StandardGamma(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_StandardGamma(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _StandardGammaGrad(output Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_StandardGammaGrad(ptr, ts.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _Std(unbiased bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { cunbiased = int32(1) } -lib.Atg_Std(ptr, ts.ctensor, cunbiased) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _Trilinear(i1 Tensor, i2 Tensor, i3 Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Trilinear(ptr, i1.ctensor, i2.ctensor, i3.ctensor, expand1, len(expand1), expand2, len(expand2), expand3, len(expand3), sumdim, len(sumdim), unrollDim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _UnsafeView(size []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_UnsafeView(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _Values(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_Values(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) _Var(unbiased bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { cunbiased = int32(1) } -lib.Atg_Var(ptr, ts.ctensor, cunbiased) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func _WeightNorm(v Tensor, g Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.Atg_WeightNorm(ptr, v.ctensor, g.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Abs(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbs(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Abs_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbs_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AbsOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAbsOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Acos(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcos(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Acos_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcos_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AcosOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAcosOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool1d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveAvgPool2dOut(out Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveAvgPool3dBackward(gradOutput Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveAvgPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveAvgPool3dOut(out Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveMaxPool2dBackward(gradOutput Tensor, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveMaxPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveMaxPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveMaxPool3dBackward(gradOutput Tensor, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AdaptiveMaxPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdaptiveMaxPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Add(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdd(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Add1(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdd1(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Add_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdd_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Add1_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAdd1_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AddOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addbmm(batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addbmm_(batch1 Tensor, batch2 Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AddbmmOut(out Tensor, batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addcdiv(tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcdiv(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addcdiv_(tensor1 Tensor, tensor2 Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcdiv_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AddcdivOut(out Tensor, tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcdivOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addcmul(tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcmul(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addcmul_(tensor1 Tensor, tensor2 Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcmul_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AddcmulOut(out Tensor, tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddcmulOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addmm(mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addmm_(mat1 Tensor, mat2 Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmm_(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AddmmOut(out Tensor, mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addmv(mat Tensor, vec Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmv(ptr, ts.ctensor, mat.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addmv_(mat Tensor, vec Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmv_(ptr, ts.ctensor, mat.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AddmvOut(out Tensor, mat Tensor, vec Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddmvOut(ptr, out.ctensor, ts.ctensor, mat.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addr(vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Addr_(vec1 Tensor, vec2 Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AddrOut(out Tensor, vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func AffineGridGenerator(theta Tensor, size []int64, alignCorners bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgAffineGridGenerator(ptr, theta.ctensor, size, len(size), calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func AffineGridGeneratorBackward(grad Tensor, size []int64, alignCorners bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgAffineGridGeneratorBackward(ptr, grad.ctensor, size, len(size), calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Alias(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAlias(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AlignAs(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAlignAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) All(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAll(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) All1(dim int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgAll1(ptr, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AllOut(out Tensor, dim int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgAllOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func AlphaDropout(input Tensor, p float64, train bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.AtgAlphaDropout(ptr, input.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AlphaDropout_(p float64, train bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.AtgAlphaDropout_(ptr, ts.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Angle(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAngle(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AngleOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAngleOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Any(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAny(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Any1(dim int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgAny1(ptr, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AnyOut(out Tensor, dim int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgAnyOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Arange(end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArange(ptr, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Arange1(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArange1(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Arange2(start Scalar, end Scalar, step Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArange2(ptr, start.cscalar, end.cscalar, step.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ArangeOut(out Tensor, end Scalar)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArangeOut(ptr, out.ctensor, end.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ArangeOut1(out Tensor, start Scalar, end Scalar)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArangeOut1(ptr, out.ctensor, start.cscalar, end.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Argmax(dim int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgArgmax(ptr, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Argmin(dim int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgArgmin(ptr, ts.ctensor, dim, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Argsort(dim int64, descending bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cdescending := int32(0) - if descending { cdescending = int32(1) } -lib.AtgArgsort(ptr, ts.ctensor, dim, cdescending) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AsStrided(size []int64, stride []int64, storageOffset int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), storageOffset) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AsStrided_(size []int64, stride []int64, storageOffset int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsStrided_(ptr, ts.ctensor, size, len(size), stride, len(stride), storageOffset) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Asin(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsin(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Asin_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsin_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AsinOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAsinOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Atan(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Atan2(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan2(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Atan2_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan2_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Atan2Out(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Atan_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtan_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) AtanOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgAtanOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool2dBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool2dOut(out Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool3dBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) AvgPool3dOut(out Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -ccountIncludePad := int32(0) - if countIncludePad { ccountIncludePad = int32(1) } -lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Baddbmm(batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBaddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Baddbmm_(batch1 Tensor, batch2 Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBaddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BaddbmmOut(out Tensor, batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBaddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func BartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBartlettWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func BartlettWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { cperiodic = int32(1) } -lib.AtgBartlettWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNorm(input Tensor, weight Tensor, bias Tensor, runningMean Tensor, runningVar Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { ctraining = int32(1) } -ccudnnEnabled := int32(0) - if cudnnEnabled { ccudnnEnabled = int32(1) } -lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps, ccudnnEnabled) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNormBackwardElemt(gradOut Tensor, input Tensor, mean Tensor, invstd Tensor, weight Tensor, meanDy Tensor, meanDyXmu Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBatchNormBackwardElemt(ptr, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNormElemt(input Tensor, weight Tensor, bias Tensor, mean Tensor, invstd Tensor, eps float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBatchNormElemt(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func BatchNormElemtOut(out Tensor, input Tensor, weight Tensor, bias Tensor, mean Tensor, invstd Tensor, eps float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBatchNormElemtOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Bernoulli(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulli(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Bernoulli1(p float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulli1(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Bernoulli_(p Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulli_(ptr, ts.ctensor, p.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Bernoulli1_(p float64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulli1_(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BernoulliOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBernoulliOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Bilinear(input1 Tensor, input2 Tensor, weight Tensor, bias Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBilinear(ptr, input1.ctensor, input2.ctensor, weight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BinaryCrossEntropy(target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropy(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BinaryCrossEntropyBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BinaryCrossEntropyBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BinaryCrossEntropyOut(out Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BinaryCrossEntropyWithLogits(target Tensor, weight Tensor, posWeight Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyWithLogits(ptr, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BinaryCrossEntropyWithLogitsBackward(gradOutput Tensor, target Tensor, weight Tensor, posWeight Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBinaryCrossEntropyWithLogitsBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Bincount(weights Tensor, minlength int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBincount(ptr, ts.ctensor, weights.ctensor, minlength) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseAnd(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAnd(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseAnd1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAnd1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseAnd_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAnd_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BitwiseAnd1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAnd1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BitwiseAndOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseAndOut1(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseAndOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseNot(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseNot(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseNot_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseNot_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BitwiseNotOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseNotOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseOr(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOr(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseOr1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOr1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseOr_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOr_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BitwiseOr1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOr1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BitwiseOrOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseOrOut1(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseOrOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseXor(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXor(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseXor1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXor1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseXor_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXor_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BitwiseXor1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXor1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) BitwiseXorOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BitwiseXorOut1(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBitwiseXorOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func BlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBlackmanWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func BlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { cperiodic = int32(1) } -lib.AtgBlackmanWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Bmm(mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBmm(ptr, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) BmmOut(out Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgBmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CartesianProd(tensors []Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} -lib.AtgCartesianProd(ptr, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Cat(tensors []Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} -lib.AtgCat(ptr, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CatOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} -lib.AtgCatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Cauchy_(median float64, sigma float64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCauchy_(ptr, ts.ctensor, median, sigma) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func Cdist(x1 Tensor, x2 Tensor, p float64, computeMode int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, computeMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ceil(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCeil(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ceil_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCeil_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) CeilOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCeilOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Celu(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Celu_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCelu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func ChainMatmul(matrices []Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cmatrices []lib.Ctensor - for _, t := range matrices {cmatrices = append(cmatrices, t.ctensor)} -lib.AtgChainMatmul(ptr, cmatrices, len(cmatrices)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Cholesky(upper bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { cupper = int32(1) } -lib.AtgCholesky(ptr, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CholeskyInverse(upper bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { cupper = int32(1) } -lib.AtgCholeskyInverse(ptr, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CholeskyInverseOut(out Tensor, upper bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { cupper = int32(1) } -lib.AtgCholeskyInverseOut(ptr, out.ctensor, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CholeskyOut(out Tensor, upper bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { cupper = int32(1) } -lib.AtgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CholeskySolve(input2 Tensor, upper bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { cupper = int32(1) } -lib.AtgCholeskySolve(ptr, ts.ctensor, input2.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CholeskySolveOut(out Tensor, input2 Tensor, upper bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cupper := int32(0) - if upper { cupper = int32(1) } -lib.AtgCholeskySolveOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, cupper) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Clamp(min Scalar, max Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Clamp_(min Scalar, max Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ClampMax(max Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMax(ptr, ts.ctensor, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ClampMax_(max Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMax_(ptr, ts.ctensor, max.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ClampMaxOut(out Tensor, max Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMaxOut(ptr, out.ctensor, ts.ctensor, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ClampMin(min Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMin(ptr, ts.ctensor, min.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ClampMin_(min Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMin_(ptr, ts.ctensor, min.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ClampMinOut(out Tensor, min Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampMinOut(ptr, out.ctensor, ts.ctensor, min.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ClampOut(out Tensor, min Scalar, max Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClampOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Coalesce(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCoalesce(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Col2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCol2im(ptr, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Col2imBackward(gradOutput Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCol2imBackward(ptr, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Col2imBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCol2imBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Col2imOut(out Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCol2imOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Combinations(r int64, withReplacement bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cwithReplacement := int32(0) - if withReplacement { cwithReplacement = int32(1) } -lib.AtgCombinations(ptr, ts.ctensor, r, cwithReplacement) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Conj(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConj(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ConjOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConjOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ConstantPadNd(pad []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConstantPadNd(ptr, ts.ctensor, pad, len(pad)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Contiguous(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgContiguous(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv1d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv2d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Conv3d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ConvTbc(weight Tensor, bias Tensor, pad int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTbc(ptr, ts.ctensor, weight.ctensor, bias.ctensor, pad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ConvTranspose1d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ConvTranspose2d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ConvTranspose3d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Convolution(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { ctransposed = int32(1) } -lib.AtgConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ConvolutionOverrideable(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctransposed := int32(0) - if transposed { ctransposed = int32(1) } -lib.AtgConvolutionOverrideable(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CopySparseToSparse_(src Tensor, nonBlocking bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -lib.AtgCopySparseToSparse_(ptr, ts.ctensor, src.ctensor, cnonBlocking) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Cos(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCos(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Cos_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCos_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) CosOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Cosh(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Cosh_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) CoshOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCoshOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CosineEmbeddingLoss(input1 Tensor, input2 Tensor, target Tensor, margin float64, reduction int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosineEmbeddingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CosineSimilarity(x1 Tensor, x2 Tensor, dim int64, eps float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCosineSimilarity(ptr, x1.ctensor, x2.ctensor, dim, eps) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Cross(other Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCross(ptr, ts.ctensor, other.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CrossOut(out Tensor, other Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CtcLoss(logProbs Tensor, targets Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - czeroInfinity := int32(0) - if zeroInfinity { czeroInfinity = int32(1) } -lib.AtgCtcLoss(ptr, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, reduction, czeroInfinity) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CtcLoss1(logProbs Tensor, targets Tensor, inputLengths Tensor, targetLengths Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - czeroInfinity := int32(0) - if zeroInfinity { czeroInfinity = int32(1) } -lib.AtgCtcLoss1(ptr, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, reduction, czeroInfinity) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnAffineGridGenerator(theta Tensor, n int64, c int64, h int64, w int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCudnnAffineGridGenerator(ptr, theta.ctensor, n, c, h, w) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnAffineGridGeneratorBackward(grad Tensor, n int64, c int64, h int64, w int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCudnnAffineGridGeneratorBackward(ptr, grad.ctensor, n, c, h, w) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CudnnConvolution(weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgCudnnConvolution(ptr, ts.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CudnnConvolution1(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgCudnnConvolution1(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgCudnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CudnnConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgCudnnConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CudnnConvolutionTranspose(weight Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgCudnnConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CudnnConvolutionTranspose1(weight Tensor, bias Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgCudnnConvolutionTranspose1(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func CudnnConvolutionTransposeBackwardInput(gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgCudnnConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgCudnnConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CudnnGridSampler(grid Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCudnnGridSampler(ptr, ts.ctensor, grid.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Cumprod(dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumprod(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CumprodOut(out Tensor, dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumprodOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Cumsum(dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumsum(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) CumsumOut(out Tensor, dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgCumsumOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Data(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgData(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Dequantize(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDequantize(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Det(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDet(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Detach(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDetach(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Detach_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDetach_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Diag(diagonal int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiag(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagEmbed(ptr, ts.ctensor, offset, dim1, dim2) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) DiagOut(out Tensor, diagonal int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagOut(ptr, out.ctensor, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Diagflat(offset int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagflat(ptr, ts.ctensor, offset) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiagonal(ptr, ts.ctensor, offset, dim1, dim2) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Digamma(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDigamma(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Digamma_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDigamma_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) DigammaOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDigammaOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Dist(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDist(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Div(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiv(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Div1(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiv1(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Div_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiv_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Div1_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDiv1_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) DivOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDivOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Dot(tensor Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDot(ptr, ts.ctensor, tensor.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) DotOut(out Tensor, tensor Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgDotOut(ptr, out.ctensor, ts.ctensor, tensor.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Dropout(input Tensor, p float64, train bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.AtgDropout(ptr, input.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Dropout_(p float64, train bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.AtgDropout_(ptr, ts.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func Einsum(equation string, tensors []Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} -lib.AtgEinsum(ptr, equation, ctensors, len(ctensors)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Elu(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgElu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Elu_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgElu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func EluBackward(gradOutput Tensor, alpha Scalar, scale Scalar, inputScale Scalar, output Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEluBackward(ptr, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func EluBackwardOut(gradInput Tensor, gradOutput Tensor, alpha Scalar, scale Scalar, inputScale Scalar, output Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEluBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) EluOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEluOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Embedding(weight Tensor, indices Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { cscaleGradByFreq = int32(1) } -csparse := int32(0) - if sparse { csparse = int32(1) } -lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmbeddingBackward(grad Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { cscaleGradByFreq = int32(1) } -csparse := int32(0) - if sparse { csparse = int32(1) } -lib.AtgEmbeddingBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq, csparse) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmbeddingDenseBackward(gradOutput Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { cscaleGradByFreq = int32(1) } -lib.AtgEmbeddingDenseBackward(ptr, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) EmbeddingRenorm_(indices Tensor, maxNorm float64, normType float64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmbeddingRenorm_(ptr, ts.ctensor, indices.ctensor, maxNorm, normType) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func EmbeddingSparseBackward(grad Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cscaleGradByFreq := int32(0) - if scaleGradByFreq { cscaleGradByFreq = int32(1) } -lib.AtgEmbeddingSparseBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Empty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmpty(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) EmptyLike(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmptyLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmptyOut(out Tensor, size []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmptyOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEmptyStrided(ptr, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Eq(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEq(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Eq1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEq1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Eq_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEq_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Eq1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEq1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) EqOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEqOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) EqOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEqOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Erf(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErf(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Erf_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErf_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ErfOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Erfc(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfc(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Erfc_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfc_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ErfcOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfcOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Erfinv(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfinv(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Erfinv_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfinv_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ErfinvOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgErfinvOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Exp(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExp(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Exp_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExp_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ExpOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Expand(size []int64, implicit bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cimplicit := int32(0) - if implicit { cimplicit = int32(1) } -lib.AtgExpand(ptr, ts.ctensor, size, len(size), cimplicit) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ExpandAs(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpandAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Expm1(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpm1(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Expm1_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpm1_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Expm1Out(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExpm1Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Exponential_(lambd float64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgExponential_(ptr, ts.ctensor, lambd) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func Eye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEye(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Eye1(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEye1(ptr, n, m, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func EyeOut(out Tensor, n int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEyeOut(ptr, out.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func EyeOut1(out Tensor, n int64, m int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgEyeOut1(ptr, out.ctensor, n, m) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FakeQuantizePerChannelAffine(scale Tensor, zeroPoint Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FakeQuantizePerChannelAffineBackward(grad Tensor, scale Tensor, zeroPoint Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerChannelAffineBackward(ptr, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerTensorAffine(ptr, ts.ctensor, scale, zeroPoint, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FakeQuantizePerTensorAffineBackward(grad Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFakeQuantizePerTensorAffineBackward(ptr, grad.ctensor, ts.ctensor, scale, zeroPoint, quantMin, quantMax) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmLinearFp16Weight(input Tensor, packedWeight Tensor, bias Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmLinearFp16Weight(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmLinearFp16WeightFp32Activation(input Tensor, packedWeight Tensor, bias Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmLinearFp16WeightFp32Activation(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmLinearInt8Weight(input Tensor, weight Tensor, packed Tensor, colOffsets Tensor, weightScale Scalar, weightZeroPoint Scalar, bias Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmLinearInt8Weight(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmLinearInt8WeightFp32Activation(input Tensor, weight Tensor, packed Tensor, colOffsets Tensor, weightScale Scalar, weightZeroPoint Scalar, bias Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmLinearInt8WeightFp32Activation(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmPackGemmMatrixFp16(input Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmPackGemmMatrixFp16(ptr, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmPackQuantizedMatrix(input Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmPackQuantizedMatrix(ptr, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FbgemmPackQuantizedMatrix1(input Tensor, k int64, n int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFbgemmPackQuantizedMatrix1(ptr, input.ctensor, k, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FeatureAlphaDropout(input Tensor, p float64, train bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.AtgFeatureAlphaDropout(ptr, input.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FeatureAlphaDropout_(p float64, train bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.AtgFeatureAlphaDropout_(ptr, ts.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func FeatureDropout(input Tensor, p float64, train bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.AtgFeatureDropout(ptr, input.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FeatureDropout_(p float64, train bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctrain := int32(0) - if train { ctrain = int32(1) } -lib.AtgFeatureDropout_(ptr, ts.ctensor, p, ctrain) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Fft(signalNdim int64, normalized bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnormalized := int32(0) - if normalized { cnormalized = int32(1) } -lib.AtgFft(ptr, ts.ctensor, signalNdim, cnormalized) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Fill_(value Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFill_(ptr, ts.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Fill1_(value Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFill1_(ptr, ts.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) FillDiagonal_(fillValue Scalar, wrap bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cwrap := int32(0) - if wrap { cwrap = int32(1) } -lib.AtgFillDiagonal_(ptr, ts.ctensor, fillValue.cscalar, cwrap) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Flatten(startDim int64, endDim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFlatten(ptr, ts.ctensor, startDim, endDim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Flip(dims []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFlip(ptr, ts.ctensor, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Floor(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloor(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Floor_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloor_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) FloorDivide(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivide(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FloorDivide1(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivide1(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FloorDivide_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivide_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) FloorDivide1_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivide1_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) FloorDivideOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FloorOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFloorOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Fmod(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmod(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Fmod1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmod1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Fmod_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmod_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Fmod1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmod1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) FmodOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmodOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FmodOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFmodOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Frac(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFrac(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Frac_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFrac_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) FracOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFracOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FractionalMaxPool2dBackward(gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFractionalMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FractionalMaxPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFractionalMaxPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FractionalMaxPool3dBackward(gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFractionalMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FractionalMaxPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFractionalMaxPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FrobeniusNorm(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFrobeniusNorm(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FrobeniusNorm1(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgFrobeniusNorm1(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FrobeniusNormOut(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgFrobeniusNormOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FromFile(filename string, shared bool, size int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cshared := int32(0) - if shared { cshared = int32(1) } -lib.AtgFromFile(ptr, filename, cshared, size, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Full(size []int64, fillValue Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFull(ptr, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) FullLike(fillValue Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFullLike(ptr, ts.ctensor, fillValue.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func FullOut(out Tensor, size []int64, fillValue Scalar)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgFullOut(ptr, out.ctensor, size, len(size), fillValue.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Gather(dim int64, index Tensor, sparseGrad bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csparseGrad := int32(0) - if sparseGrad { csparseGrad = int32(1) } -lib.AtgGather(ptr, ts.ctensor, dim, index.ctensor, csparseGrad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) GatherOut(out Tensor, dim int64, index Tensor, sparseGrad bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csparseGrad := int32(0) - if sparseGrad { csparseGrad = int32(1) } -lib.AtgGatherOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ge(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGe(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ge1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGe1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ge_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGe_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Ge1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGe1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) GeOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) GeOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Gelu(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) GeluBackward(grad Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeluBackward(ptr, grad.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Geometric_(p float64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGeometric_(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Ger(vec2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGer(ptr, ts.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) GerOut(out Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGerOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Glu(dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGlu(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) GluBackward(gradOutput Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGluBackward(ptr, gradOutput.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) GluBackwardOut(gradInput Tensor, gradOutput Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGluBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) GluOut(out Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGluOut(ptr, out.ctensor, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Grad(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGrad(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func GridSampler(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgGridSampler(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func GridSampler2d(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgGridSampler2d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func GridSampler3d(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgGridSampler3d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func GroupNorm(input Tensor, numGroups int64, weight Tensor, bias Tensor, eps float64, cudnnEnabled bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ccudnnEnabled := int32(0) - if cudnnEnabled { ccudnnEnabled = int32(1) } -lib.AtgGroupNorm(ptr, input.ctensor, numGroups, weight.ctensor, bias.ctensor, eps, ccudnnEnabled) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func GruCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Gt(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGt(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Gt1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGt1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Gt_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGt_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Gt1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGt1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) GtOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGtOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) GtOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgGtOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func HammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHammingWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func HammingWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { cperiodic = int32(1) } -lib.AtgHammingWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func HammingWindow2(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { cperiodic = int32(1) } -lib.AtgHammingWindow2(ptr, windowLength, cperiodic, alpha, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func HammingWindow3(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { cperiodic = int32(1) } -lib.AtgHammingWindow3(ptr, windowLength, cperiodic, alpha, beta, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func HannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHannWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func HannWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cperiodic := int32(0) - if periodic { cperiodic = int32(1) } -lib.AtgHannWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Hardshrink(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardshrink(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) HardshrinkBackward(gradOut Tensor, lambd Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardshrinkBackward(ptr, gradOut.ctensor, ts.ctensor, lambd.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Hardsigmoid(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoid(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Hardsigmoid_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoid_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) HardsigmoidBackward(gradOutput Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) HardsigmoidOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardsigmoidOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Hardtanh(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Hardtanh_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) HardtanhBackward(gradOutput Tensor, minVal Scalar, maxVal Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanhBackward(ptr, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) HardtanhBackwardOut(gradInput Tensor, gradOutput Tensor, minVal Scalar, maxVal Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanhBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) HardtanhOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHardtanhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) HingeEmbeddingLoss(target Tensor, margin float64, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHingeEmbeddingLoss(ptr, ts.ctensor, target.ctensor, margin, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Histc(bins int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHistc(ptr, ts.ctensor, bins) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) HistcOut(out Tensor, bins int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHistcOut(ptr, out.ctensor, ts.ctensor, bins) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Hspmm(mat1 Tensor, mat2 Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHspmm(ptr, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func HspmmOut(out Tensor, mat1 Tensor, mat2 Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgHspmmOut(ptr, out.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ifft(signalNdim int64, normalized bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnormalized := int32(0) - if normalized { cnormalized = int32(1) } -lib.AtgIfft(ptr, ts.ctensor, signalNdim, cnormalized) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Im2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIm2col(ptr, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Im2colBackward(gradOutput Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIm2colBackward(ptr, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Im2colBackwardOut(gradInput Tensor, gradOutput Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIm2colBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Im2colOut(out Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIm2colOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Imag(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgImag(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Index(indices []Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cindices []lib.Ctensor - for _, t := range indices {cindices = append(cindices, t.ctensor)} -lib.AtgIndex(ptr, ts.ctensor, cindices, len(cindices)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) IndexAdd(dim int64, index Tensor, source Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexAdd(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) IndexAdd_(dim int64, index Tensor, source Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexAdd_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) IndexCopy(dim int64, index Tensor, source Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexCopy(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) IndexCopy_(dim int64, index Tensor, source Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) IndexFill(dim int64, index Tensor, value Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexFill(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) IndexFill1(dim int64, index Tensor, value Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexFill1(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) IndexFill_(dim int64, index Tensor, value Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexFill_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) IndexFill1_(dim int64, index Tensor, value Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexFill1_(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) IndexPut(indices []Tensor, values Tensor, accumulate bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cindices []lib.Ctensor - for _, t := range indices {cindices = append(cindices, t.ctensor)} -caccumulate := int32(0) - if accumulate { caccumulate = int32(1) } -lib.AtgIndexPut(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) IndexPut_(indices []Tensor, values Tensor, accumulate bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var cindices []lib.Ctensor - for _, t := range indices {cindices = append(cindices, t.ctensor)} -caccumulate := int32(0) - if accumulate { caccumulate = int32(1) } -lib.AtgIndexPut_(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) IndexSelect(dim int64, index Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) IndexSelectOut(out Tensor, dim int64, index Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndexSelectOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Indices(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIndices(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func InstanceNorm(input Tensor, weight Tensor, bias Tensor, runningMean Tensor, runningVar Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cuseInputStats := int32(0) - if useInputStats { cuseInputStats = int32(1) } -ccudnnEnabled := int32(0) - if cudnnEnabled { ccudnnEnabled = int32(1) } -lib.AtgInstanceNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, cuseInputStats, momentum, eps, ccudnnEnabled) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) IntRepr(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIntRepr(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Inverse(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgInverse(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) InverseOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgInverseOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Irfft(signalNdim int64, normalized bool, onesided bool, signalSizes []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnormalized := int32(0) - if normalized { cnormalized = int32(1) } -conesided := int32(0) - if onesided { conesided = int32(1) } -lib.AtgIrfft(ptr, ts.ctensor, signalNdim, cnormalized, conesided, signalSizes, len(signalSizes)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Isclose(other Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cequalNan := int32(0) - if equalNan { cequalNan = int32(1) } -lib.AtgIsclose(ptr, ts.ctensor, other.ctensor, rtol, atol, cequalNan) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Isfinite(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsfinite(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Isinf(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsinf(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Isnan(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgIsnan(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) KlDiv(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) KlDivBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgKlDivBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) L1Loss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgL1Loss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) L1LossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) L1LossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgL1LossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) L1LossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func LayerNorm(input Tensor, normalizedShape []int64, weight Tensor, bias Tensor, eps float64, cudnnEnable bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ccudnnEnable := int32(0) - if cudnnEnable { ccudnnEnable = int32(1) } -lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps, ccudnnEnable) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Le(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLe(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Le1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLe1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Le_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLe_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Le1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLe1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LeOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LeOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LeakyRelu(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeakyRelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LeakyRelu_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeakyRelu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LeakyReluBackward(gradOutput Tensor, negativeSlope Scalar, selfIsResult bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cselfIsResult := int32(0) - if selfIsResult { cselfIsResult = int32(1) } -lib.AtgLeakyReluBackward(ptr, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LeakyReluOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLeakyReluOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Lerp(end Tensor, weight Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerp(ptr, ts.ctensor, end.ctensor, weight.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Lerp1(end Tensor, weight Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerp1(ptr, ts.ctensor, end.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Lerp_(end Tensor, weight Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerp_(ptr, ts.ctensor, end.ctensor, weight.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Lerp1_(end Tensor, weight Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerp1_(ptr, ts.ctensor, end.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LerpOut(out Tensor, end Tensor, weight Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerpOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LerpOut1(out Tensor, end Tensor, weight Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLerpOut1(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Lgamma(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLgamma(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Lgamma_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLgamma_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LgammaOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLgammaOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Linear(input Tensor, weight Tensor, bias Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Linspace(start Scalar, end Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinspace(ptr, start.cscalar, end.cscalar, steps, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func LinspaceOut(out Tensor, start Scalar, end Scalar, steps int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Log(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Log10(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog10(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Log10_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog10_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Log10Out(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog10Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Log1p(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog1p(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Log1p_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog1p_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Log1pOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog1pOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Log2(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog2(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Log2_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog2_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Log2Out(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog2Out(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Log_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLog_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LogNormal_(mean float64, std float64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogNormal_(ptr, ts.ctensor, mean, std) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LogOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogSigmoid(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSigmoid(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogSigmoidBackward(gradOutput Tensor, buffer Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor, buffer.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogSigmoidBackwardOut(gradInput Tensor, gradOutput Tensor, buffer Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSigmoidBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, buffer.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogSigmoidOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSigmoidOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Logdet(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogdet(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogicalAnd(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalAnd(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogicalAnd_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalAnd_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LogicalAndOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogicalNot(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalNot(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogicalNot_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalNot_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LogicalNotOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalNotOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogicalOr(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalOr(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogicalOr_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalOr_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LogicalOrOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogicalXor(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalXor(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogicalXor_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalXor_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LogicalXorOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogicalXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Logspace(start Scalar, end Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogspace(ptr, start.cscalar, end.cscalar, steps, base, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func LogspaceOut(out Tensor, start Scalar, end Scalar, steps int64, base float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps, base) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Logsumexp(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LogsumexpOut(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Lt(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLt(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Lt1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLt1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Lt_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLt_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Lt1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLt1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) LtOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLtOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LtOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLtOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LuSolve(lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLuSolve(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) LuSolveOut(out Tensor, lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgLuSolveOut(ptr, out.ctensor, ts.ctensor, lUData.ctensor, lUPivots.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func MarginRankingLoss(input1 Tensor, input2 Tensor, target Tensor, margin float64, reduction int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMarginRankingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaskedFill(mask Tensor, value Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedFill(ptr, ts.ctensor, mask.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaskedFill1(mask Tensor, value Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedFill1(ptr, ts.ctensor, mask.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaskedFill_(mask Tensor, value Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedFill_(ptr, ts.ctensor, mask.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) MaskedFill1_(mask Tensor, value Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedFill1_(ptr, ts.ctensor, mask.ctensor, value.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) MaskedScatter(mask Tensor, source Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedScatter(ptr, ts.ctensor, mask.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaskedScatter_(mask Tensor, source Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedScatter_(ptr, ts.ctensor, mask.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) MaskedSelect(mask Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedSelect(ptr, ts.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaskedSelectOut(out Tensor, mask Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaskedSelectOut(ptr, out.ctensor, ts.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Matmul(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatmul(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MatmulOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MatrixPower(n int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMatrixPower(ptr, ts.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MatrixRank(symmetric bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csymmetric := int32(0) - if symmetric { csymmetric = int32(1) } -lib.AtgMatrixRank(ptr, ts.ctensor, csymmetric) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MatrixRank1(tol float64, symmetric bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - csymmetric := int32(0) - if symmetric { csymmetric = int32(1) } -lib.AtgMatrixRank1(ptr, ts.ctensor, tol, csymmetric) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Max(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMax(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Max1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMax1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxPool2dWithIndicesBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgMaxPool2dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxPool2dWithIndicesBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgMaxPool2dWithIndicesBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxPool3dWithIndicesBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgMaxPool3dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxPool3dWithIndicesBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgMaxPool3dWithIndicesBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxUnpool2d(indices Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool2d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxUnpool2dBackward(gradOutput Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxUnpool2dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxUnpool2dOut(out Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool2dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxUnpool3d(indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool3d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxUnpool3dBackward(gradOutput Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxUnpool3dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxUnpool3dOut(out Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMaxUnpool3dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MaxValues(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgMaxValues(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mean(dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMean(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mean1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgMean1(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MeanOut(out Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgMeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Median(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMedian(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Min(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMin(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Min1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMin1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MinOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMinOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MinValues(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgMinValues(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MiopenConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenConvolutionBackwardBias(gradOutput Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMiopenConvolutionBackwardBias(ptr, gradOutput.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MiopenConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MiopenConvolutionTranspose(weight Tensor, bias Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenConvolutionTransposeBackwardInput(gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MiopenDepthwiseConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenDepthwiseConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func MiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenDepthwiseConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { cbenchmark = int32(1) } -cdeterministic := int32(0) - if deterministic { cdeterministic = int32(1) } -lib.AtgMiopenDepthwiseConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MkldnnConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func MkldnnConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbiasDefined := int32(0) - if biasDefined { cbiasDefined = int32(1) } -lib.AtgMkldnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbiasDefined) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func MkldnnLinear(input Tensor, weight Tensor, bias Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgMkldnnMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMkldnnReorderConv2dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mm(mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMm(ptr, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MmOut(out Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MseLoss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MseLossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMseLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MseLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMseLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MseLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMseLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mul(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMul(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mul1(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMul1(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mul_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMul_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Mul1_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMul1_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) MulOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MultiMarginLossBackward(gradOutput Tensor, target Tensor, p Scalar, margin Scalar, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MultiMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, p Scalar, margin Scalar, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultiMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MultilabelMarginLoss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultilabelMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MultilabelMarginLossBackward(gradOutput Tensor, target Tensor, reduction int64, isTarget Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultilabelMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MultilabelMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, isTarget Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultilabelMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MultilabelMarginLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMultilabelMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Multinomial(numSamples int64, replacement bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - creplacement := int32(0) - if replacement { creplacement = int32(1) } -lib.AtgMultinomial(ptr, ts.ctensor, numSamples, creplacement) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MultinomialOut(out Tensor, numSamples int64, replacement bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - creplacement := int32(0) - if replacement { creplacement = int32(1) } -lib.AtgMultinomialOut(ptr, out.ctensor, ts.ctensor, numSamples, creplacement) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mv(vec Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMv(ptr, ts.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) MvOut(out Tensor, vec Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMvOut(ptr, out.ctensor, ts.ctensor, vec.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mvlgamma(p int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMvlgamma(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Mvlgamma_(p int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMvlgamma_(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Narrow(dim int64, start int64, length int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNarrow(ptr, ts.ctensor, dim, start, length) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Narrow1(dim int64, start Tensor, length int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNarrow1(ptr, ts.ctensor, dim, start.ctensor, length) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NarrowCopy(dim int64, start int64, length int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNarrowCopy(ptr, ts.ctensor, dim, start, length) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NativeNorm(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNativeNorm(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ne(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNe(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ne1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNe1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ne_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNe_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Ne1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNe1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) NeOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NeOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Neg(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeg(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Neg_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNeg_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) NegOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNegOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNewEmpty(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NewFull(size []int64, fillValue Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNewFull(ptr, ts.ctensor, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNewZeros(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NllLoss(target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NllLoss2d(target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss2d(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NllLoss2dBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss2dBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NllLoss2dBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NllLoss2dOut(out Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLoss2dOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NllLossBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NllLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NllLossOut(out Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNllLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Nonzero(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNonzero(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NonzeroOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNonzeroOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Norm(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNorm(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Norm1(p Scalar, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNorm1(ptr, ts.ctensor, p.cscalar, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Norm2(p Scalar, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgNorm2(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Norm3(p Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgNorm3(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func NormExceptDim(v Tensor, pow int64, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormExceptDim(ptr, v.ctensor, pow, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NormOut(out Tensor, p Scalar, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgNormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NormOut1(out Tensor, p Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgNormOut1(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Normal_(mean float64, std float64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormal_(ptr, ts.ctensor, mean, std) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func NormalOut(out Tensor, mean Tensor, std float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormalOut(ptr, out.ctensor, mean.ctensor, std) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func NormalOut1(out Tensor, mean float64, std Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormalOut1(ptr, out.ctensor, mean, std.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func NormalOut2(out Tensor, mean Tensor, std Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormalOut2(ptr, out.ctensor, mean.ctensor, std.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func NormalOut3(out Tensor, mean float64, std float64, size []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNormalOut3(ptr, out.ctensor, mean, std, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NuclearNorm(keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgNuclearNorm(ptr, ts.ctensor, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NuclearNorm1(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgNuclearNorm1(ptr, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NuclearNormOut(out Tensor, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgNuclearNormOut(ptr, out.ctensor, ts.ctensor, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NuclearNormOut1(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgNuclearNormOut1(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) NumpyT(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgNumpyT(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) OneHot(numClasses int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOneHot(ptr, ts.ctensor, numClasses) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Ones(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOnes(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) OnesLike(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOnesLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func OnesOut(out Tensor, size []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOnesOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Orgqr(input2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOrgqr(ptr, ts.ctensor, input2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) OrgqrOut(out Tensor, input2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOrgqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Ormqr(input2 Tensor, input3 Tensor, left bool, transpose bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cleft := int32(0) - if left { cleft = int32(1) } -ctranspose := int32(0) - if transpose { ctranspose = int32(1) } -lib.AtgOrmqr(ptr, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) OrmqrOut(out Tensor, input2 Tensor, input3 Tensor, left bool, transpose bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cleft := int32(0) - if left { cleft = int32(1) } -ctranspose := int32(0) - if transpose { ctranspose = int32(1) } -lib.AtgOrmqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func PairwiseDistance(x1 Tensor, x2 Tensor, p float64, eps float64, keepdim bool)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgPairwiseDistance(ptr, x1.ctensor, x2.ctensor, p, eps, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Pdist(p float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPdist(ptr, ts.ctensor, p) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Permute(dims []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPermute(ptr, ts.ctensor, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) PinMemory(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPinMemory(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Pinverse(rcond float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPinverse(ptr, ts.ctensor, rcond) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) PixelShuffle(upscaleFactor int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPixelShuffle(ptr, ts.ctensor, upscaleFactor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Poisson(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPoisson(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func PoissonNllLoss(input Tensor, target Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - clogInput := int32(0) - if logInput { clogInput = int32(1) } -cfull := int32(0) - if full { cfull = int32(1) } -lib.AtgPoissonNllLoss(ptr, input.ctensor, target.ctensor, clogInput, cfull, eps, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Polygamma(n int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPolygamma(ptr, n, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Polygamma_(n int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPolygamma_(ptr, ts.ctensor, n) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) PolygammaOut(out Tensor, n int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPolygammaOut(ptr, out.ctensor, n, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Pow(exponent Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPow(ptr, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Pow1(exponent Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPow1(ptr, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Pow2(selfScalar Scalar, exponent Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPow2(ptr, selfScalar.cscalar, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Pow_(exponent Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPow_(ptr, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Pow1_(exponent Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPow1_(ptr, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) PowOut(out Tensor, exponent Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) PowOut1(out Tensor, exponent Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowOut1(ptr, out.ctensor, ts.ctensor, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func PowOut2(out Tensor, selfScalar Scalar, exponent Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPowOut2(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Prelu(weight Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgPrelu(ptr, ts.ctensor, weight.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Prod(dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgProd(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Prod1(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgProd1(ptr, ts.ctensor, dim, ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ProdOut(out Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgProdOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Put_(index Tensor, source Tensor, accumulate bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - caccumulate := int32(0) - if accumulate { caccumulate = int32(1) } -lib.AtgPut_(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) QPerChannelScales(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQPerChannelScales(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) QPerChannelZeroPoints(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQPerChannelZeroPoints(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) QuantizePerChannel(scales Tensor, zeroPoints Tensor, axis int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizePerChannel(ptr, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) QuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizePerTensor(ptr, ts.ctensor, scale, zeroPoint, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedBatchNorm(input Tensor, weight Tensor, bias Tensor, mean Tensor, vari Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizedBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedGruCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizedGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) QuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cceilMode := int32(0) - if ceilMode { cceilMode = int32(1) } -lib.AtgQuantizedMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedRnnReluCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizedRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func QuantizedRnnTanhCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgQuantizedRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRand(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RandLike(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandOut(out Tensor, size []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Randint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandint(ptr, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Randint1(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandint1(ptr, low, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RandintLike(high int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintLike(ptr, ts.ctensor, high) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RandintLike1(low int64, high int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintLike1(ptr, ts.ctensor, low, high) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandintOut(out Tensor, high int64, size []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintOut(ptr, out.ctensor, high, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandintOut1(out Tensor, low int64, high int64, size []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandintOut1(ptr, out.ctensor, low, high, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Randn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandn(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RandnLike(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandnLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandnOut(out Tensor, size []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandnOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Random_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandom_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Random1_(to int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandom1_(ptr, ts.ctensor, to) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Random2(from int64, to int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandom2(ptr, ts.ctensor, from, to) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func Randperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandperm(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RandpermOut(out Tensor, n int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandpermOut(ptr, out.ctensor, n) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Range(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRange(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Range1(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRange1(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RangeOut(out Tensor, start Scalar, end Scalar)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRangeOut(ptr, out.ctensor, start.cscalar, end.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Real(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReal(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Reciprocal(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReciprocal(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Reciprocal_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReciprocal_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ReciprocalOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReciprocalOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReflectionPad1d(padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad1d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReflectionPad1dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReflectionPad1dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReflectionPad1dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReflectionPad2d(padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad2d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReflectionPad2dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReflectionPad2dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReflectionPad2dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReflectionPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Relu(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Relu_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRelu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Remainder(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainder(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Remainder1(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainder1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Remainder_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainder_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Remainder1_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainder1_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) RemainderOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainderOut(ptr, out.ctensor, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RemainderOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRemainderOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Renorm(p Scalar, dim int64, maxnorm Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRenorm(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Renorm_(p Scalar, dim int64, maxnorm Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRenorm_(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) RenormOut(out Tensor, p Scalar, dim int64, maxnorm Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRenormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Repeat(repeats []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRepeat(ptr, ts.ctensor, repeats, len(repeats)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RepeatInterleave(repeats Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRepeatInterleave(ptr, repeats.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RepeatInterleave1(repeats Tensor, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRepeatInterleave1(ptr, ts.ctensor, repeats.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RepeatInterleave2(repeats int64, dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRepeatInterleave2(ptr, ts.ctensor, repeats, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad1d(padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad1d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad1dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad1dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad1dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad2d(padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad2d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad2dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad2dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad2dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad3d(padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad3d(ptr, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad3dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad3dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReplicationPad3dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReplicationPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RequiresGrad_(requiresGrad bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - crequiresGrad := int32(0) - if requiresGrad { crequiresGrad = int32(1) } -lib.AtgRequiresGrad_(ptr, ts.ctensor, crequiresGrad) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Reshape(shape []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReshape(ptr, ts.ctensor, shape, len(shape)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ReshapeAs(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgReshapeAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Resize_(size []int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgResize_(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ResizeAs_(theTemplate Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgResizeAs_(ptr, ts.ctensor, theTemplate.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Rfft(signalNdim int64, normalized bool, onesided bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnormalized := int32(0) - if normalized { cnormalized = int32(1) } -conesided := int32(0) - if onesided { conesided = int32(1) } -lib.AtgRfft(ptr, ts.ctensor, signalNdim, cnormalized, conesided) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RnnReluCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func RnnTanhCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Roll(shifts []int64, dims []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRoll(ptr, ts.ctensor, shifts, len(shifts), dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Rot90(k int64, dims []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRot90(ptr, ts.ctensor, k, dims, len(dims)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Round(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRound(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Round_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRound_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) RoundOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRoundOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Rrelu(training bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { ctraining = int32(1) } -lib.AtgRrelu(ptr, ts.ctensor, ctraining) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Rrelu_(training bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { ctraining = int32(1) } -lib.AtgRrelu_(ptr, ts.ctensor, ctraining) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) RreluWithNoise(noise Tensor, training bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { ctraining = int32(1) } -lib.AtgRreluWithNoise(ptr, ts.ctensor, noise.ctensor, ctraining) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RreluWithNoise_(noise Tensor, training bool)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { ctraining = int32(1) } -lib.AtgRreluWithNoise_(ptr, ts.ctensor, noise.ctensor, ctraining) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) RreluWithNoiseBackward(gradOutput Tensor, noise Tensor, lower Scalar, upper Scalar, training bool, selfIsResult bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { ctraining = int32(1) } -cselfIsResult := int32(0) - if selfIsResult { cselfIsResult = int32(1) } -lib.AtgRreluWithNoiseBackward(ptr, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) RreluWithNoiseOut(out Tensor, noise Tensor, training bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ctraining := int32(0) - if training { ctraining = int32(1) } -lib.AtgRreluWithNoiseOut(ptr, out.ctensor, ts.ctensor, noise.ctensor, ctraining) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Rsqrt(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsqrt(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Rsqrt_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsqrt_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) RsqrtOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsqrtOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Rsub(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsub(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Rsub1(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRsub1(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ScalarTensor(s Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScalarTensor(ptr, s.cscalar, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Scatter(dim int64, index Tensor, src Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatter(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Scatter1(dim int64, index Tensor, value Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatter1(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Scatter_(dim int64, index Tensor, src Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatter_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Scatter1_(dim int64, index Tensor, value Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatter1_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ScatterAdd(dim int64, index Tensor, src Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterAdd(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ScatterAdd_(dim int64, index Tensor, src Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgScatterAdd_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Select(dim int64, index int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSelect(ptr, ts.ctensor, dim, index) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Selu(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSelu(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Selu_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSelu_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Set_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSet_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Set1_(source Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSet1_(ptr, ts.ctensor, source.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) SetRequiresGrad(r bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cr := int32(0) - if r { cr = int32(1) } -lib.AtgSetRequiresGrad(ptr, ts.ctensor, cr) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sigmoid(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoid(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sigmoid_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoid_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func SigmoidBackward(gradOutput Tensor, output Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoidBackward(ptr, gradOutput.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func SigmoidBackwardOut(gradInput Tensor, gradOutput Tensor, output Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoidBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SigmoidOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSigmoidOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sign(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSign(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sign_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSign_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) SignOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSignOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sin(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSin(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sin_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSin_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) SinOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sinh(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sinh_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) SinhOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSinhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Slice(dim int64, start int64, end int64, step int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlice(ptr, ts.ctensor, dim, start, end, step) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SlowConv3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConv3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SlowConv3dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConv3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SlowConvDilated2d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvDilated2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SlowConvDilated3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvDilated3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SlowConvTranspose2d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvTranspose2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SlowConvTranspose2dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvTranspose2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SlowConvTranspose3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvTranspose3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SlowConvTranspose3dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSlowConvTranspose3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Smm(mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmm(ptr, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SmoothL1Loss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SmoothL1LossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SmoothL1LossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmoothL1LossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SmoothL1LossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftMarginLoss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftMarginLossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftMarginLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Softmax(dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Softplus(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftplus(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftplusBackward(gradOutput Tensor, beta Scalar, threshold Scalar, output Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftplusBackward(ptr, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftplusBackwardOut(gradInput Tensor, gradOutput Tensor, beta Scalar, threshold Scalar, output Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftplusBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftplusOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftplusOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Softshrink(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftshrink(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftshrinkBackward(gradOutput Tensor, lambd Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftshrinkBackward(ptr, gradOutput.ctensor, ts.ctensor, lambd.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftshrinkBackwardOut(gradInput Tensor, gradOutput Tensor, lambd Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftshrinkBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, lambd.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SoftshrinkOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSoftshrinkOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func SparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseCooTensor(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func SparseCooTensor1(indices Tensor, values Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseCooTensor1(ptr, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func SparseCooTensor2(indices Tensor, values Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseCooTensor2(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SparseMask(mask Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseMask(ptr, ts.ctensor, mask.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SparseResize_(size []int64, sparseDim int64, denseDim int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseResize_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) SparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSparseResizeAndClear_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Sqrt(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqrt(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sqrt_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqrt_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) SqrtOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqrtOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Square(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSquare(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Square_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSquare_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Squeeze(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqueeze(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Squeeze1(dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqueeze1(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Squeeze_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqueeze_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Squeeze1_(dim int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSqueeze1_(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Sspaddmm(mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSspaddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SspaddmmOut(out Tensor, mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSspaddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Stack(tensors []Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} -lib.AtgStack(ptr, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func StackOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} -lib.AtgStackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Std(unbiased bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { cunbiased = int32(1) } -lib.AtgStd(ptr, ts.ctensor, cunbiased) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Std1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { cunbiased = int32(1) } -ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgStd1(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) StdOut(out Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { cunbiased = int32(1) } -ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgStdOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Stft(nFft int64, hopLength int64, winLength int64, window Tensor, normalized bool, onesided bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnormalized := int32(0) - if normalized { cnormalized = int32(1) } -conesided := int32(0) - if onesided { conesided = int32(1) } -lib.AtgStft(ptr, ts.ctensor, nFft, hopLength, winLength, window.ctensor, cnormalized, conesided) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sub(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSub(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sub1(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSub1(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sub_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSub_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Sub1_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSub1_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) SubOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSubOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sum(dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSum(ptr, ts.ctensor, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Sum1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgSum1(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SumOut(out Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgSumOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) SumToSize(size []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSumToSize(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) T(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgT(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) T_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgT_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Take(index Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTake(ptr, ts.ctensor, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) TakeOut(out Tensor, index Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTakeOut(ptr, out.ctensor, ts.ctensor, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Tan(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTan(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Tan_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTan_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) TanOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Tanh(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanh(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Tanh_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanh_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func TanhBackward(gradOutput Tensor, output Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanhBackward(ptr, gradOutput.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func TanhBackwardOut(gradInput Tensor, gradOutput Tensor, output Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanhBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) TanhOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTanhOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Tensordot(other Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTensordot(ptr, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Threshold(threshold Scalar, value Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThreshold(ptr, ts.ctensor, threshold.cscalar, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Threshold_(threshold Scalar, value Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThreshold_(ptr, ts.ctensor, threshold.cscalar, value.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) ThresholdBackward(gradOutput Tensor, threshold Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThresholdBackward(ptr, gradOutput.ctensor, ts.ctensor, threshold.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ThresholdOut(out Tensor, threshold Scalar, value Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgThresholdOut(ptr, out.ctensor, ts.ctensor, threshold.cscalar, value.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) To(device gotch.Device, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTo(ptr, ts.ctensor, device.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) To1(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -ccopy := int32(0) - if copy { ccopy = int32(1) } -lib.AtgTo1(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking, ccopy) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) To2(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -ccopy := int32(0) - if copy { ccopy = int32(1) } -lib.AtgTo2(ptr, ts.ctensor, dtype.CInt(), cnonBlocking, ccopy) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) To3(other Tensor, nonBlocking bool, copy bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -ccopy := int32(0) - if copy { ccopy = int32(1) } -lib.AtgTo3(ptr, ts.ctensor, other.ctensor, cnonBlocking, ccopy) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) To4(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cnonBlocking := int32(0) - if nonBlocking { cnonBlocking = int32(1) } -ccopy := int32(0) - if copy { ccopy = int32(1) } -lib.AtgTo4(ptr, ts.ctensor, device.CInt(), dtype.CInt(), cnonBlocking, ccopy) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ToDense(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToDense(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ToDenseBackward(grad Tensor, input Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToDenseBackward(ptr, grad.ctensor, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ToMkldnn(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToMkldnn(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ToMkldnnBackward(grad Tensor, input Tensor)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToMkldnnBackward(ptr, grad.ctensor, input.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ToSparse(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToSparse(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ToSparse1(sparseDim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgToSparse1(ptr, ts.ctensor, sparseDim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Totype(scalarType gotch.DType, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTotype(ptr, ts.ctensor, scalarType.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Trace(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrace(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Transpose(dim0 int64, dim1 int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Transpose_(dim0 int64, dim1 int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTranspose_(ptr, ts.ctensor, dim0, dim1) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func Trapz(y Tensor, x Tensor, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrapz(ptr, y.ctensor, x.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func Trapz1(y Tensor, dx float64, dim int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrapz1(ptr, y.ctensor, dx, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Tril(diagonal int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTril(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Tril_(diagonal int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTril_(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func TrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrilIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) TrilOut(out Tensor, diagonal int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrilOut(ptr, out.ctensor, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func TripletMarginLoss(anchor Tensor, positive Tensor, negative Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cswap := int32(0) - if swap { cswap = int32(1) } -lib.AtgTripletMarginLoss(ptr, anchor.ctensor, positive.ctensor, negative.ctensor, margin, p, eps, cswap, reduction) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Triu(diagonal int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTriu(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Triu_(diagonal int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTriu_(ptr, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func TriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTriuIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) TriuOut(out Tensor, diagonal int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTriuOut(ptr, out.ctensor, ts.ctensor, diagonal) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) TrueDivide(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivide(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) TrueDivide1(other Scalar, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivide1(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) TrueDivide_(other Tensor)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivide_(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) TrueDivide1_(other Scalar)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivide1_(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) TrueDivideOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrueDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Trunc(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrunc(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Trunc_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTrunc_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) TruncOut(out Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTruncOut(ptr, out.ctensor, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) TypeAs(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgTypeAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Unfold(dimension int64, size int64, step int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUnfold(ptr, ts.ctensor, dimension, size, step) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Uniform_(from float64, to float64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUniform_(ptr, ts.ctensor, from, to) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) Unsqueeze(dim int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUnsqueeze(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Unsqueeze_(dim int64)(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUnsqueeze_(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func(ts Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleBicubic2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleBicubic2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleBicubic2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleBicubic2dOut(out Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleBilinear2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleBilinear2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleBilinear2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleBilinear2dOut(out Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scales) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleLinear1dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scales) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleLinear1dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleLinear1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scales) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleLinear1dOut(out Tensor, outputSize []int64, alignCorners bool, scales float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scales) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleNearest1d(outputSize []int64, scales float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, len(outputSize), scales) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest1dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scales float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scales) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest1dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scales float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scales) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleNearest1dOut(out Tensor, outputSize []int64, scales float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scales) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleNearest2d(outputSize []int64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleNearest2dOut(out Tensor, outputSize []int64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleNearest3d(outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, len(outputSize), scalesD, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest3dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesD, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleNearest3dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesD, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleNearest3dOut(out Tensor, outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scalesD, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesD, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleTrilinear3dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesD, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func UpsampleTrilinear3dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleTrilinear3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesD, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) UpsampleTrilinear3dOut(out Tensor, outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - calignCorners := int32(0) - if alignCorners { calignCorners = int32(1) } -lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesD, scalesH, scalesW) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Values(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgValues(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Var(unbiased bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { cunbiased = int32(1) } -lib.AtgVar(ptr, ts.ctensor, cunbiased) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Var1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { cunbiased = int32(1) } -ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgVar1(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) VarOut(out Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cunbiased := int32(0) - if unbiased { cunbiased = int32(1) } -ckeepdim := int32(0) - if keepdim { ckeepdim = int32(1) } -lib.AtgVarOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) View(size []int64, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgView(ptr, ts.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ViewAs(other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgViewAs(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Where1(condition Tensor, other Tensor, del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgWhere1(ptr, condition.ctensor, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) Zero_()(err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZero_(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return err - } - - return err -} - -func Zeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZeros(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func(ts Tensor) ZerosLike(del bool)(retVal Tensor, err error) { -if del { defer ts.MustDrop() } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZerosLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} - -func ZerosOut(out Tensor, size []int64)(retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZerosOut(ptr, out.ctensor, size, len(size)) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = Tensor{ctensor: *ptr} - - return retVal, err -} -// End of implementing Tensor ================================= + lib.Atg__And_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __And1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__And1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Iand_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Iand_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Iand1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Iand1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Ilshift_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ilshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Ilshift1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ilshift1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Ior_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ior_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Ior1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ior1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Irshift_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Irshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Irshift1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Irshift1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Ixor_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ixor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Ixor1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ixor1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Lshift_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Lshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Lshift1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Lshift1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Or_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Or_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Or1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Or1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Rshift_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Rshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Rshift1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Rshift1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Xor_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Xor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) __Xor1(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Xor1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) _AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Addr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Addr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Addr_(vec1 *Tensor, vec2 *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Addr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) _AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _AmpUpdateScale(growthTracker *Tensor, currentScale *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AmpUpdateScale(ptr, growthTracker.ctensor, currentScale.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _BaddbmmMkl_(batch1 *Tensor, batch2 *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_BaddbmmMkl_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) _CastByte(nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CastByte(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CastChar(nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CastChar(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CastDouble(nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CastDouble(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CastFloat(nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CastFloat(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CastHalf(nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CastHalf(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CastInt(nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CastInt(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CastLong(nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CastLong(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CastShort(nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CastShort(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Cat(tensors []Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.Atg_Cat(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.Atg_CatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_CdistBackward(ptr, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CholeskyHelper(upper bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { + cupper = int32(1) + } + lib.Atg_CholeskyHelper(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CholeskySolveHelper(a *Tensor, upper bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { + cupper = int32(1) + } + lib.Atg_CholeskySolveHelper(ptr, ts.ctensor, a.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Coalesced_(coalesced bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccoalesced := int32(0) + if coalesced { + ccoalesced = int32(1) + } + lib.Atg_Coalesced_(ptr, ts.ctensor, ccoalesced) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func _Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { + ctransposed = int32(1) + } + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + ccudnnEnabled := int32(0) + if cudnnEnabled { + ccudnnEnabled = int32(1) + } + lib.Atg_Convolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _ConvolutionNogroup(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { + ctransposed = int32(1) + } + lib.Atg_ConvolutionNogroup(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CopyFrom(dst *Tensor, nonBlocking bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.Atg_CopyFrom(ptr, ts.ctensor, dst.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { + czeroInfinity = int32(1) + } + lib.Atg_CtcLossBackward(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.Atg_CudnnInitDropoutState(ptr, dropout, ctrain, dropoutSeed, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, bidirectional bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cweightArr []lib.Ctensor + for _, t := range weightArr { + cweightArr = append(cweightArr, t.ctensor) + } + cbatchFirst := int32(0) + if batchFirst { + cbatchFirst = int32(1) + } + cbidirectional := int32(0) + if bidirectional { + cbidirectional = int32(1) + } + lib.Atg_CudnnRnnFlattenWeight(ptr, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, numLayers, cbatchFirst, cbidirectional) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Cumprod(dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Cumprod(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CumprodOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_CumprodOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Cumsum(dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Cumsum(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _CumsumOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_CumsumOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _DimArange(like *Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_DimArange(ptr, like.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_DirichletGrad(ptr, x.ctensor, alpha.ctensor, total.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { + cscaleGradByFreq = int32(1) + } + csparse := int32(0) + if sparse { + csparse = int32(1) + } + lib.Atg_EmbeddingBagBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { + cscaleGradByFreq = int32(1) + } + lib.Atg_EmbeddingBagDenseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmbeddingBagPerSampleWeightsBackward(ptr, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { + cscaleGradByFreq = int32(1) + } + lib.Atg_EmbeddingBagSparseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmptyAffineQuantized(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt(), scale, zeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmptyPerChannelAffineQuantized(ptr, size, len(size), scales.ctensor, zeroPoints.ctensor, axis, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccomplexInput := int32(0) + if complexInput { + ccomplexInput = int32(1) + } + ccomplexOutput := int32(0) + if complexOutput { + ccomplexOutput = int32(1) + } + cinverse := int32(0) + if inverse { + cinverse = int32(1) + } + cnormalized := int32(0) + if normalized { + cnormalized = int32(1) + } + conesided := int32(0) + if onesided { + conesided = int32(1) + } + lib.Atg_FftWithSize(ptr, ts.ctensor, signalNdim, ccomplexInput, ccomplexOutput, cinverse, checkedSignalSizes, len(checkedSignalSizes), cnormalized, conesided, outputSizes, len(outputSizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_GatherSparseBackward(ptr, ts.ctensor, dim, index.ctensor, grad.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _IndexCopy_(dim int64, index *Tensor, source *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_IndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) _IndexPutImpl_(indices []Tensor, values *Tensor, accumulate bool, unsafety bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cindices []lib.Ctensor + for _, t := range indices { + cindices = append(cindices, t.ctensor) + } + caccumulate := int32(0) + if accumulate { + caccumulate = int32(1) + } + cunsafety := int32(0) + if unsafety { + cunsafety = int32(1) + } + lib.Atg_IndexPutImpl_(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate, cunsafety) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) _Indices(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Indices(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _InverseHelper(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_InverseHelper(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _LogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { + chalfToFloat = int32(1) + } + lib.Atg_LogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _LuSolveHelper(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LuSolveHelper(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MakePerChannelQuantizedTensor(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MakePerTensorQuantizedTensor(ptr, ts.ctensor, scale, zeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _MaskedScale(mask *Tensor, scale float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MaskedScale(ptr, ts.ctensor, mask.ctensor, scale) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _MkldnnReshape(shape []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnReshape(ptr, ts.ctensor, shape, len(shape)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _MkldnnTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnTranspose(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _MkldnnTranspose_(dim0 int64, dim1 int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnTranspose_(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func _MultinomialAliasDraw(j *Tensor, q *Tensor, numSamples int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MultinomialAliasDraw(ptr, j.ctensor, q.ctensor, numSamples) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NnpackSpatialConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _NnpackSpatialConvolutionBackwardInput(input *Tensor, gradOutput *Tensor, weight *Tensor, padding []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NnpackSpatialConvolutionBackwardInput(ptr, input.ctensor, gradOutput.ctensor, weight.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _NnpackSpatialConvolutionBackwardWeight(input *Tensor, weightsize []int64, gradOutput *Tensor, padding []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NnpackSpatialConvolutionBackwardWeight(ptr, input.ctensor, weightsize, len(weightsize), gradOutput.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbatchFirst := int32(0) + if batchFirst { + cbatchFirst = int32(1) + } + lib.Atg_PackPaddedSequenceBackward(ptr, grad.ctensor, inputSize, len(inputSize), batchSizes.ctensor, cbatchFirst) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_PdistBackward(ptr, grad.ctensor, ts.ctensor, p, pdist.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _ReshapeFromTensor(shape *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ReshapeFromTensor(ptr, ts.ctensor, shape.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SWhere(condition *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SWhere(ptr, condition.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SampleDirichlet(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SampleDirichlet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _ShapeAsTensor(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ShapeAsTensor(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineFf_(ptr, ts.ctensor, n, sobolstate.ctensor, dimension, numGenerated) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) _SobolEngineInitializeState_(dimension int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineInitializeState_(ptr, ts.ctensor, dimension) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) _SobolEngineScramble_(ltm *Tensor, dimension int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineScramble_(ptr, ts.ctensor, ltm.ctensor, dimension) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) _Softmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { + chalfToFloat = int32(1) + } + lib.Atg_Softmax(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseAddmm(sparse *Tensor, dense *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseAddmm(ptr, ts.ctensor, sparse.ctensor, dense.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorUnsafe(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorWithDims(ptr, sparseDim, denseDim, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorWithDimsAndTensors(ptr, sparseDim, denseDim, size, len(size), indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseMm(sparse *Tensor, dense *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseMm(ptr, sparse.ctensor, dense.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseSum(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseSum1(dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum1(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseSum2(dim []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum2(ptr, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseSum3(dim []int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum3(ptr, ts.ctensor, dim, len(dim), dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseSumBackward(grad *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSumBackward(ptr, grad.ctensor, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _StandardGamma(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_StandardGamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _StandardGammaGrad(output *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_StandardGammaGrad(ptr, ts.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Std(unbiased bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { + cunbiased = int32(1) + } + lib.Atg_Std(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Trilinear(ptr, i1.ctensor, i2.ctensor, i3.ctensor, expand1, len(expand1), expand2, len(expand2), expand3, len(expand3), sumdim, len(sumdim), unrollDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _UnsafeView(size []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_UnsafeView(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Values(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Values(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Var(unbiased bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { + cunbiased = int32(1) + } + lib.Atg_Var(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _WeightNorm(v *Tensor, g *Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_WeightNorm(ptr, v.ctensor, g.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Abs(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbs(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Abs_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbs_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AbsOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Acos(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcos(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Acos_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcos_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AcosOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcosOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool1d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveAvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveMaxPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AdaptiveMaxPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Add(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Add1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Add_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Add1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AddOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addbmm_(batch1 *Tensor, batch2 *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdiv(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addcdiv_(tensor1 *Tensor, tensor2 *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdiv_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdivOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addcmul(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmul(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addcmul_(tensor1 *Tensor, tensor2 *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmul_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmulOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addmm_(mat1 *Tensor, mat2 *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmm_(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addmv(mat *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmv(ptr, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addmv_(mat *Tensor, vec *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmv_(ptr, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmvOut(ptr, out.ctensor, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Addr_(vec1 *Tensor, vec2 *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func AffineGridGenerator(theta *Tensor, size []int64, alignCorners bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgAffineGridGenerator(ptr, theta.ctensor, size, len(size), calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func AffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgAffineGridGeneratorBackward(ptr, grad.ctensor, size, len(size), calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Alias(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAlias(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AlignAs(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAlignAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) All(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAll(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) All1(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgAll1(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AllOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgAllOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func AlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.AtgAlphaDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AlphaDropout_(p float64, train bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.AtgAlphaDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Angle(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAngle(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AngleOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAngleOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Any(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAny(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Any1(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgAny1(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AnyOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgAnyOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Arange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArange(ptr, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Arange1(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArange1(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Arange2(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArange2(ptr, start.cscalar, end.cscalar, step.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ArangeOut(out *Tensor, end *Scalar) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArangeOut(ptr, out.ctensor, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ArangeOut1(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArangeOut1(ptr, out.ctensor, start.cscalar, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Argmax(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgArgmax(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Argmin(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgArgmin(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Argsort(dim int64, descending bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cdescending := int32(0) + if descending { + cdescending = int32(1) + } + lib.AtgArgsort(ptr, ts.ctensor, dim, cdescending) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AsStrided(size []int64, stride []int64, storageOffset int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), storageOffset) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AsStrided_(size []int64, stride []int64, storageOffset int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsStrided_(ptr, ts.ctensor, size, len(size), stride, len(stride), storageOffset) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Asin(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Asin_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsin_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AsinOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Atan(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Atan2(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Atan2_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Atan2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Atan_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AtanOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + ccountIncludePad := int32(0) + if countIncludePad { + ccountIncludePad = int32(1) + } + lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Baddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Baddbmm_(batch1 *Tensor, batch2 *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBartlettWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BartlettWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { + cperiodic = int32(1) + } + lib.AtgBartlettWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { + ctraining = int32(1) + } + ccudnnEnabled := int32(0) + if cudnnEnabled { + ccudnnEnabled = int32(1) + } + lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormBackwardElemt(ptr, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormElemt(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormElemtOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Bernoulli(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Bernoulli1(p float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli1(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Bernoulli_(p *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli_(ptr, ts.ctensor, p.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Bernoulli1_(p float64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli1_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BernoulliOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulliOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Bilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBilinear(ptr, input1.ctensor, input2.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropy(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BinaryCrossEntropyBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyWithLogits(ptr, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyWithLogitsBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Bincount(weights *Tensor, minlength int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBincount(ptr, ts.ctensor, weights.ctensor, minlength) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseAnd(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseAnd1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseAnd_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BitwiseAnd1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BitwiseAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseAndOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAndOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseNot(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNot(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseNot_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNot_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BitwiseNotOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNotOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseOr(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseOr1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseOr_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BitwiseOr1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BitwiseOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseOrOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOrOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseXor(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseXor1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseXor_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BitwiseXor1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) BitwiseXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BitwiseXorOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXorOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBlackmanWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func BlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { + cperiodic = int32(1) + } + lib.AtgBlackmanWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Bmm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBmm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BmmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CartesianProd(tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgCartesianProd(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Cat(tensors []Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgCat(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgCatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cauchy_(median float64, sigma float64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCauchy_(ptr, ts.ctensor, median, sigma) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Cdist(x1 *Tensor, x2 *Tensor, p float64, computeMode int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, computeMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ceil(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeil(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ceil_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeil_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) CeilOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeilOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Celu(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Celu_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func ChainMatmul(matrices []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cmatrices []lib.Ctensor + for _, t := range matrices { + cmatrices = append(cmatrices, t.ctensor) + } + lib.AtgChainMatmul(ptr, cmatrices, len(cmatrices)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cholesky(upper bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { + cupper = int32(1) + } + lib.AtgCholesky(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CholeskyInverse(upper bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { + cupper = int32(1) + } + lib.AtgCholeskyInverse(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CholeskyInverseOut(out *Tensor, upper bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { + cupper = int32(1) + } + lib.AtgCholeskyInverseOut(ptr, out.ctensor, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { + cupper = int32(1) + } + lib.AtgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CholeskySolve(input2 *Tensor, upper bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { + cupper = int32(1) + } + lib.AtgCholeskySolve(ptr, ts.ctensor, input2.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { + cupper = int32(1) + } + lib.AtgCholeskySolveOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Clamp(min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Clamp_(min *Scalar, max *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ClampMax(max *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMax(ptr, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ClampMax_(max *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMax_(ptr, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ClampMaxOut(out *Tensor, max *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMaxOut(ptr, out.ctensor, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ClampMin(min *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMin(ptr, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ClampMin_(min *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMin_(ptr, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ClampMinOut(out *Tensor, min *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMinOut(ptr, out.ctensor, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ClampOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Coalesce(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCoalesce(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Col2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2im(ptr, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Col2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imBackward(ptr, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Col2imBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Col2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Combinations(r int64, withReplacement bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cwithReplacement := int32(0) + if withReplacement { + cwithReplacement = int32(1) + } + lib.AtgCombinations(ptr, ts.ctensor, r, cwithReplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Conj(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConj(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ConjOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConjOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ConstantPadNd(pad []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConstantPadNd(ptr, ts.ctensor, pad, len(pad)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Contiguous(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgContiguous(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTbc(ptr, ts.ctensor, weight.ctensor, bias.ctensor, pad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { + ctransposed = int32(1) + } + lib.AtgConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { + ctransposed = int32(1) + } + lib.AtgConvolutionOverrideable(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CopySparseToSparse_(src *Tensor, nonBlocking bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + lib.AtgCopySparseToSparse_(ptr, ts.ctensor, src.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Cos(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCos(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cos_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCos_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) CosOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cosh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cosh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) CoshOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCoshOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosineEmbeddingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosineSimilarity(ptr, x1.ctensor, x2.ctensor, dim, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cross(other *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCross(ptr, ts.ctensor, other.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CrossOut(out *Tensor, other *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { + czeroInfinity = int32(1) + } + lib.AtgCtcLoss(ptr, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, reduction, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CtcLoss1(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { + czeroInfinity = int32(1) + } + lib.AtgCtcLoss1(ptr, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, reduction, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnAffineGridGenerator(ptr, theta.ctensor, n, c, h, w) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnAffineGridGeneratorBackward(ptr, grad.ctensor, n, c, h, w) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgCudnnConvolution(ptr, ts.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnConvolution1(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgCudnnConvolution1(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgCudnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgCudnnConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgCudnnConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnConvolutionTranspose1(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgCudnnConvolutionTranspose1(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgCudnnConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgCudnnConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnGridSampler(grid *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnGridSampler(ptr, ts.ctensor, grid.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cumprod(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprod(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprodOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cumsum(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumsum(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumsumOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Data(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgData(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Dequantize(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDequantize(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Det(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Detach(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDetach(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Detach_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDetach_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Diag(diagonal int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiag(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagEmbed(ptr, ts.ctensor, offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) DiagOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Diagflat(offset int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagflat(ptr, ts.ctensor, offset) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagonal(ptr, ts.ctensor, offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Digamma(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Digamma_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigamma_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) DigammaOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigammaOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Dist(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDist(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Div(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Div1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Div_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Div1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) DivOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Dot(tensor *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDot(ptr, ts.ctensor, tensor.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) DotOut(out *Tensor, tensor *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDotOut(ptr, out.ctensor, ts.ctensor, tensor.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Dropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.AtgDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Dropout_(p float64, train bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.AtgDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Einsum(equation string, tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgEinsum(ptr, equation, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Elu(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgElu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Elu_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgElu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func EluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, output *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEluBackward(ptr, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EluBackwardOut(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, output *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEluBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) EluOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Embedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { + cscaleGradByFreq = int32(1) + } + csparse := int32(0) + if sparse { + csparse = int32(1) + } + lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { + cscaleGradByFreq = int32(1) + } + csparse := int32(0) + if sparse { + csparse = int32(1) + } + lib.AtgEmbeddingBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq, csparse) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { + cscaleGradByFreq = int32(1) + } + lib.AtgEmbeddingDenseBackward(ptr, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) EmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmbeddingRenorm_(ptr, ts.ctensor, indices.ctensor, maxNorm, normType) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func EmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { + cscaleGradByFreq = int32(1) + } + lib.AtgEmbeddingSparseBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Empty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmpty(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) EmptyLike(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmptyOut(out *Tensor, size []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyStrided(ptr, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Eq(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Eq1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Eq_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Eq1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) EqOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEqOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) EqOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEqOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Erf(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Erf_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErf_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ErfOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Erfc(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Erfc_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfc_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ErfcOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfcOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Erfinv(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinv(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Erfinv_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinv_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ErfinvOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinvOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Exp(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Exp_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ExpOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Expand(size []int64, implicit bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cimplicit := int32(0) + if implicit { + cimplicit = int32(1) + } + lib.AtgExpand(ptr, ts.ctensor, size, len(size), cimplicit) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ExpandAs(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpandAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Expm1(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Expm1_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Expm1Out(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Exponential_(lambd float64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExponential_(ptr, ts.ctensor, lambd) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Eye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEye(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Eye1(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEye1(ptr, n, m, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EyeOut(out *Tensor, n int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEyeOut(ptr, out.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func EyeOut1(out *Tensor, n int64, m int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEyeOut1(ptr, out.ctensor, n, m) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FakeQuantizePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerChannelAffineBackward(ptr, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerTensorAffine(ptr, ts.ctensor, scale, zeroPoint, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FakeQuantizePerTensorAffineBackward(grad *Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerTensorAffineBackward(ptr, grad.ctensor, ts.ctensor, scale, zeroPoint, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearFp16Weight(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearFp16WeightFp32Activation(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearInt8Weight(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearInt8WeightFp32Activation(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackGemmMatrixFp16(input *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackGemmMatrixFp16(ptr, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackQuantizedMatrix(input *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackQuantizedMatrix(ptr, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackQuantizedMatrix1(input *Tensor, k int64, n int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackQuantizedMatrix1(ptr, input.ctensor, k, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FeatureAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.AtgFeatureAlphaDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FeatureAlphaDropout_(p float64, train bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.AtgFeatureAlphaDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func FeatureDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.AtgFeatureDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FeatureDropout_(p float64, train bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { + ctrain = int32(1) + } + lib.AtgFeatureDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Fft(signalNdim int64, normalized bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { + cnormalized = int32(1) + } + lib.AtgFft(ptr, ts.ctensor, signalNdim, cnormalized) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Fill_(value *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFill_(ptr, ts.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Fill1_(value *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFill1_(ptr, ts.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) FillDiagonal_(fillValue *Scalar, wrap bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cwrap := int32(0) + if wrap { + cwrap = int32(1) + } + lib.AtgFillDiagonal_(ptr, ts.ctensor, fillValue.cscalar, cwrap) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Flatten(startDim int64, endDim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFlatten(ptr, ts.ctensor, startDim, endDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Flip(dims []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFlip(ptr, ts.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Floor(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloor(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Floor_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloor_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) FloorDivide(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FloorDivide1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FloorDivide_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) FloorDivide1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) FloorDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FloorOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Fmod(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Fmod1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Fmod_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Fmod1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) FmodOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmodOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FmodOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmodOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Frac(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrac(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Frac_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrac_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) FracOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFracOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FractionalMaxPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FractionalMaxPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FrobeniusNorm(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrobeniusNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FrobeniusNorm1(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgFrobeniusNorm1(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgFrobeniusNormOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FromFile(filename string, shared bool, size int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cshared := int32(0) + if shared { + cshared = int32(1) + } + lib.AtgFromFile(ptr, filename, cshared, size, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Full(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFull(ptr, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FullLike(fillValue *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFullLike(ptr, ts.ctensor, fillValue.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func FullOut(out *Tensor, size []int64, fillValue *Scalar) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFullOut(ptr, out.ctensor, size, len(size), fillValue.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Gather(dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csparseGrad := int32(0) + if sparseGrad { + csparseGrad = int32(1) + } + lib.AtgGather(ptr, ts.ctensor, dim, index.ctensor, csparseGrad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csparseGrad := int32(0) + if sparseGrad { + csparseGrad = int32(1) + } + lib.AtgGatherOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ge(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ge1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ge_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Ge1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) GeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Gelu(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GeluBackward(grad *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeluBackward(ptr, grad.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Geometric_(p float64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeometric_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Ger(vec2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGer(ptr, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GerOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGerOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Glu(dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGlu(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GluBackward(gradOutput *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluBackward(ptr, gradOutput.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GluBackwardOut(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GluOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Grad(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGrad(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgGridSampler(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgGridSampler2d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgGridSampler3d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccudnnEnabled := int32(0) + if cudnnEnabled { + ccudnnEnabled = int32(1) + } + lib.AtgGroupNorm(ptr, input.ctensor, numGroups, weight.ctensor, bias.ctensor, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func GruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Gt(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Gt1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Gt_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Gt1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) GtOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGtOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GtOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGtOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHammingWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { + cperiodic = int32(1) + } + lib.AtgHammingWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow2(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { + cperiodic = int32(1) + } + lib.AtgHammingWindow2(ptr, windowLength, cperiodic, alpha, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow3(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { + cperiodic = int32(1) + } + lib.AtgHammingWindow3(ptr, windowLength, cperiodic, alpha, beta, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHannWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HannWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { + cperiodic = int32(1) + } + lib.AtgHannWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Hardshrink(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardshrink(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) HardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardshrinkBackward(ptr, gradOut.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Hardsigmoid(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Hardsigmoid_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoid_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) HardsigmoidBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) HardsigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Hardtanh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Hardtanh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) HardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhBackward(ptr, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) HardtanhBackwardOut(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) HardtanhOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) HingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHingeEmbeddingLoss(ptr, ts.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Histc(bins int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHistc(ptr, ts.ctensor, bins) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) HistcOut(out *Tensor, bins int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHistcOut(ptr, out.ctensor, ts.ctensor, bins) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Hspmm(mat1 *Tensor, mat2 *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHspmm(ptr, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHspmmOut(ptr, out.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ifft(signalNdim int64, normalized bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { + cnormalized = int32(1) + } + lib.AtgIfft(ptr, ts.ctensor, signalNdim, cnormalized) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Im2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2col(ptr, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Im2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colBackward(ptr, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Im2colBackwardOut(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Im2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Imag(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgImag(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Index(indices []Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cindices []lib.Ctensor + for _, t := range indices { + cindices = append(cindices, t.ctensor) + } + lib.AtgIndex(ptr, ts.ctensor, cindices, len(cindices)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IndexAdd(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexAdd(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IndexAdd_(dim int64, index *Tensor, source *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexAdd_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) IndexCopy(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexCopy(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IndexCopy_(dim int64, index *Tensor, source *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) IndexFill(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IndexFill1(dim int64, index *Tensor, value *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill1(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IndexFill_(dim int64, index *Tensor, value *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) IndexFill1_(dim int64, index *Tensor, value *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill1_(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) IndexPut(indices []Tensor, values *Tensor, accumulate bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cindices []lib.Ctensor + for _, t := range indices { + cindices = append(cindices, t.ctensor) + } + caccumulate := int32(0) + if accumulate { + caccumulate = int32(1) + } + lib.AtgIndexPut(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IndexPut_(indices []Tensor, values *Tensor, accumulate bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cindices []lib.Ctensor + for _, t := range indices { + cindices = append(cindices, t.ctensor) + } + caccumulate := int32(0) + if accumulate { + caccumulate = int32(1) + } + lib.AtgIndexPut_(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) IndexSelect(dim int64, index *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexSelectOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Indices(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndices(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func InstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cuseInputStats := int32(0) + if useInputStats { + cuseInputStats = int32(1) + } + ccudnnEnabled := int32(0) + if cudnnEnabled { + ccudnnEnabled = int32(1) + } + lib.AtgInstanceNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, cuseInputStats, momentum, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IntRepr(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIntRepr(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Inverse(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInverse(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) InverseOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInverseOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Irfft(signalNdim int64, normalized bool, onesided bool, signalSizes []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { + cnormalized = int32(1) + } + conesided := int32(0) + if onesided { + conesided = int32(1) + } + lib.AtgIrfft(ptr, ts.ctensor, signalNdim, cnormalized, conesided, signalSizes, len(signalSizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Isclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cequalNan := int32(0) + if equalNan { + cequalNan = int32(1) + } + lib.AtgIsclose(ptr, ts.ctensor, other.ctensor, rtol, atol, cequalNan) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Isfinite(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsfinite(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Isinf(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsinf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Isnan(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsnan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) KlDiv(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) KlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgKlDivBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) L1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1Loss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) L1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) L1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) L1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccudnnEnable := int32(0) + if cudnnEnable { + ccudnnEnable = int32(1) + } + lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps, ccudnnEnable) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Le(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Le1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Le_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Le1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LeakyRelu(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyRelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LeakyRelu_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyRelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cselfIsResult := int32(0) + if selfIsResult { + cselfIsResult = int32(1) + } + lib.AtgLeakyReluBackward(ptr, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LeakyReluOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyReluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lerp(end *Tensor, weight *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp(ptr, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lerp1(end *Tensor, weight *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp1(ptr, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lerp_(end *Tensor, weight *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp_(ptr, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Lerp1_(end *Tensor, weight *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp1_(ptr, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LerpOut(out *Tensor, end *Tensor, weight *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerpOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LerpOut1(out *Tensor, end *Tensor, weight *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerpOut1(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lgamma(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lgamma_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgamma_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LgammaOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgammaOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Linear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Linspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinspace(ptr, start.cscalar, end.cscalar, steps, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Log(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Log10(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Log10_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Log10Out(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Log1p(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1p(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Log1p_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1p_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Log1pOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1pOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Log2(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Log2_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Log2Out(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Log_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LogNormal_(mean float64, std float64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogNormal_(ptr, ts.ctensor, mean, std) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LogOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogSigmoid(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor, buffer.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogSigmoidBackwardOut(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, buffer.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogSigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Logdet(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogdet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogicalAnd(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAnd(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogicalAnd_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAnd_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LogicalAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogicalNot(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNot(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogicalNot_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNot_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LogicalNotOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNotOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogicalOr(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOr(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogicalOr_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOr_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LogicalOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogicalXor(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogicalXor_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LogicalXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Logspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogspace(ptr, start.cscalar, end.cscalar, steps, base, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps, base) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Logsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lt(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lt1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lt_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Lt1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LtOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLtOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LtOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLtOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LuSolve(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLuSolve(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLuSolveOut(ptr, out.ctensor, ts.ctensor, lUData.ctensor, lUPivots.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMarginRankingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaskedFill(mask *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill(ptr, ts.ctensor, mask.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaskedFill1(mask *Tensor, value *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill1(ptr, ts.ctensor, mask.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaskedFill_(mask *Tensor, value *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill_(ptr, ts.ctensor, mask.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) MaskedFill1_(mask *Tensor, value *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill1_(ptr, ts.ctensor, mask.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) MaskedScatter(mask *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedScatter(ptr, ts.ctensor, mask.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaskedScatter_(mask *Tensor, source *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedScatter_(ptr, ts.ctensor, mask.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) MaskedSelect(mask *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedSelect(ptr, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaskedSelectOut(out *Tensor, mask *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedSelectOut(ptr, out.ctensor, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Matmul(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatmul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MatrixPower(n int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixPower(ptr, ts.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MatrixRank(symmetric bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csymmetric := int32(0) + if symmetric { + csymmetric = int32(1) + } + lib.AtgMatrixRank(ptr, ts.ctensor, csymmetric) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MatrixRank1(tol float64, symmetric bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csymmetric := int32(0) + if symmetric { + csymmetric = int32(1) + } + lib.AtgMatrixRank1(ptr, ts.ctensor, tol, csymmetric) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Max(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMax(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Max1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMax1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMaxPool2dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxPool2dWithIndicesBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMaxPool2dWithIndicesBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMaxPool3dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxPool3dWithIndicesBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMaxPool3dWithIndicesBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxUnpool2d(indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxUnpool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxUnpool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaxValues(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgMaxValues(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mean(dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMean(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mean1(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgMean1(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgMeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Median(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMedian(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Min(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Min1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMin1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MinOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMinOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MinValues(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgMinValues(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenConvolutionBackwardBias(gradOutput *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMiopenConvolutionBackwardBias(ptr, gradOutput.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MiopenConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenDepthwiseConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenDepthwiseConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.AtgMiopenDepthwiseConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MkldnnAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MkldnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbiasDefined := int32(0) + if biasDefined { + cbiasDefined = int32(1) + } + lib.AtgMkldnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbiasDefined) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func MkldnnLinear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMkldnnMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnReorderConv2dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MseLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MseLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MseLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mul(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mul1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mul_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Mul1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) MulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MultiMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MultilabelMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MultilabelMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Multinomial(numSamples int64, replacement bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + creplacement := int32(0) + if replacement { + creplacement = int32(1) + } + lib.AtgMultinomial(ptr, ts.ctensor, numSamples, creplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + creplacement := int32(0) + if replacement { + creplacement = int32(1) + } + lib.AtgMultinomialOut(ptr, out.ctensor, ts.ctensor, numSamples, creplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mv(vec *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMv(ptr, ts.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MvOut(out *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvOut(ptr, out.ctensor, ts.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mvlgamma(p int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvlgamma(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Mvlgamma_(p int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvlgamma_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Narrow(dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrow(ptr, ts.ctensor, dim, start, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Narrow1(dim int64, start *Tensor, length int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrow1(ptr, ts.ctensor, dim, start.ctensor, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NarrowCopy(dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrowCopy(ptr, ts.ctensor, dim, start, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NativeNorm(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNativeNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ne(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ne1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ne_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Ne1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) NeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Neg(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeg(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Neg_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeg_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) NegOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewEmpty(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewFull(ptr, ts.ctensor, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewZeros(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2d(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NllLoss2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NllLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Nonzero(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNonzero(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NonzeroOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNonzeroOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Norm(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Norm1(p *Scalar, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNorm1(ptr, ts.ctensor, p.cscalar, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Norm2(p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNorm2(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Norm3(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNorm3(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormExceptDim(v *Tensor, pow int64, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormExceptDim(ptr, v.ctensor, pow, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NormOut1(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNormOut1(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Normal_(mean float64, std float64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormal_(ptr, ts.ctensor, mean, std) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func NormalOut(out *Tensor, mean *Tensor, std float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalOut(ptr, out.ctensor, mean.ctensor, std) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormalOut1(out *Tensor, mean float64, std *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalOut1(ptr, out.ctensor, mean, std.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormalOut2(out *Tensor, mean *Tensor, std *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalOut2(ptr, out.ctensor, mean.ctensor, std.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormalOut3(out *Tensor, mean float64, std float64, size []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalOut3(ptr, out.ctensor, mean, std, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NuclearNorm(keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNuclearNorm(ptr, ts.ctensor, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NuclearNorm1(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNuclearNorm1(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NuclearNormOut(out *Tensor, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNuclearNormOut(ptr, out.ctensor, ts.ctensor, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NuclearNormOut1(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNuclearNormOut1(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NumpyT(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNumpyT(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) OneHot(numClasses int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOneHot(ptr, ts.ctensor, numClasses) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Ones(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnes(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) OnesLike(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnesLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func OnesOut(out *Tensor, size []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnesOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Orgqr(input2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOrgqr(ptr, ts.ctensor, input2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) OrgqrOut(out *Tensor, input2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOrgqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Ormqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cleft := int32(0) + if left { + cleft = int32(1) + } + ctranspose := int32(0) + if transpose { + ctranspose = int32(1) + } + lib.AtgOrmqr(ptr, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) OrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cleft := int32(0) + if left { + cleft = int32(1) + } + ctranspose := int32(0) + if transpose { + ctranspose = int32(1) + } + lib.AtgOrmqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func PairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgPairwiseDistance(ptr, x1.ctensor, x2.ctensor, p, eps, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Pdist(p float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPdist(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Permute(dims []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPermute(ptr, ts.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) PinMemory(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPinMemory(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Pinverse(rcond float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPinverse(ptr, ts.ctensor, rcond) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) PixelShuffle(upscaleFactor int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPixelShuffle(ptr, ts.ctensor, upscaleFactor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Poisson(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPoisson(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func PoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + clogInput := int32(0) + if logInput { + clogInput = int32(1) + } + cfull := int32(0) + if full { + cfull = int32(1) + } + lib.AtgPoissonNllLoss(ptr, input.ctensor, target.ctensor, clogInput, cfull, eps, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Polygamma(n int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygamma(ptr, n, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Polygamma_(n int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygamma_(ptr, ts.ctensor, n) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) PolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygammaOut(ptr, out.ctensor, n, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Pow(exponent *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow(ptr, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Pow1(exponent *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow1(ptr, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Pow2(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow2(ptr, selfScalar.cscalar, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Pow_(exponent *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow_(ptr, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Pow1_(exponent *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow1_(ptr, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) PowOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) PowOut1(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowOut1(ptr, out.ctensor, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func PowOut2(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowOut2(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Prelu(weight *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPrelu(ptr, ts.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Prod(dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgProd(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Prod1(dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgProd1(ptr, ts.ctensor, dim, ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ProdOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgProdOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Put_(index *Tensor, source *Tensor, accumulate bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + caccumulate := int32(0) + if accumulate { + caccumulate = int32(1) + } + lib.AtgPut_(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) QPerChannelScales(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQPerChannelScales(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) QPerChannelZeroPoints(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQPerChannelZeroPoints(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) QuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizePerChannel(ptr, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) QuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizePerTensor(ptr, ts.ctensor, scale, zeroPoint, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) QuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgQuantizedMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRand(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RandLike(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandOut(out *Tensor, size []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Randint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandint(ptr, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Randint1(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandint1(ptr, low, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RandintLike(high int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintLike(ptr, ts.ctensor, high) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RandintLike1(low int64, high int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintLike1(ptr, ts.ctensor, low, high) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandintOut(out *Tensor, high int64, size []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintOut(ptr, out.ctensor, high, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandintOut1(out *Tensor, low int64, high int64, size []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintOut1(ptr, out.ctensor, low, high, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Randn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandn(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RandnLike(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandnLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandnOut(out *Tensor, size []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandnOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Random_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandom_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Random1_(to int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandom1_(ptr, ts.ctensor, to) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Random2(from int64, to int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandom2(ptr, ts.ctensor, from, to) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Randperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandperm(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandpermOut(out *Tensor, n int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandpermOut(ptr, out.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Range(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRange(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Range1(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRange1(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RangeOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRangeOut(ptr, out.ctensor, start.cscalar, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Real(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Reciprocal(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Reciprocal_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocal_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ReciprocalOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocalOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReflectionPad1d(padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReflectionPad1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReflectionPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReflectionPad2d(padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReflectionPad2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReflectionPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Relu(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Relu_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Remainder(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Remainder1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Remainder_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Remainder1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) RemainderOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RemainderOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Renorm(p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenorm(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Renorm_(p *Scalar, dim int64, maxnorm *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenorm_(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) RenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Repeat(repeats []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeat(ptr, ts.ctensor, repeats, len(repeats)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RepeatInterleave(repeats *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeatInterleave(ptr, repeats.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RepeatInterleave1(repeats *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeatInterleave1(ptr, ts.ctensor, repeats.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RepeatInterleave2(repeats int64, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeatInterleave2(ptr, ts.ctensor, repeats, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad1d(padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad2d(padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad3d(padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReplicationPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RequiresGrad_(requiresGrad bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + crequiresGrad := int32(0) + if requiresGrad { + crequiresGrad = int32(1) + } + lib.AtgRequiresGrad_(ptr, ts.ctensor, crequiresGrad) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Reshape(shape []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReshape(ptr, ts.ctensor, shape, len(shape)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ReshapeAs(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReshapeAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Resize_(size []int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResize_(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ResizeAs_(theTemplate *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResizeAs_(ptr, ts.ctensor, theTemplate.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Rfft(signalNdim int64, normalized bool, onesided bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { + cnormalized = int32(1) + } + conesided := int32(0) + if onesided { + conesided = int32(1) + } + lib.AtgRfft(ptr, ts.ctensor, signalNdim, cnormalized, conesided) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func RnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Roll(shifts []int64, dims []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoll(ptr, ts.ctensor, shifts, len(shifts), dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Rot90(k int64, dims []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRot90(ptr, ts.ctensor, k, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Round(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRound(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Round_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRound_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) RoundOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoundOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Rrelu(training bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { + ctraining = int32(1) + } + lib.AtgRrelu(ptr, ts.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Rrelu_(training bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { + ctraining = int32(1) + } + lib.AtgRrelu_(ptr, ts.ctensor, ctraining) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) RreluWithNoise(noise *Tensor, training bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { + ctraining = int32(1) + } + lib.AtgRreluWithNoise(ptr, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RreluWithNoise_(noise *Tensor, training bool) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { + ctraining = int32(1) + } + lib.AtgRreluWithNoise_(ptr, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) RreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { + ctraining = int32(1) + } + cselfIsResult := int32(0) + if selfIsResult { + cselfIsResult = int32(1) + } + lib.AtgRreluWithNoiseBackward(ptr, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) RreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { + ctraining = int32(1) + } + lib.AtgRreluWithNoiseOut(ptr, out.ctensor, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Rsqrt(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrt(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Rsqrt_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrt_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) RsqrtOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrtOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Rsub(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsub(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Rsub1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsub1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScalarTensor(ptr, s.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Scatter(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Scatter1(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter1(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Scatter_(dim int64, index *Tensor, src *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Scatter1_(dim int64, index *Tensor, value *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter1_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ScatterAdd(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterAdd(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ScatterAdd_(dim int64, index *Tensor, src *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterAdd_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Select(dim int64, index int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelect(ptr, ts.ctensor, dim, index) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Selu(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Selu_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Set_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSet_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Set1_(source *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSet1_(ptr, ts.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SetRequiresGrad(r bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cr := int32(0) + if r { + cr = int32(1) + } + lib.AtgSetRequiresGrad(ptr, ts.ctensor, cr) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sigmoid(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sigmoid_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoid_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func SigmoidBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidBackward(ptr, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SigmoidBackwardOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sign(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSign(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sign_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSign_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SignOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSignOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sin(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sin_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSin_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SinOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sinh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sinh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SinhOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Slice(dim int64, start int64, end int64, step int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlice(ptr, ts.ctensor, dim, start, end, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConv3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConv3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvDilated2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvDilated3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Smm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SmoothL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Softmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Softplus(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplus(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusBackward(ptr, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftplusBackwardOut(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftplusOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Softshrink(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrink(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkBackward(ptr, gradOutput.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftshrinkBackwardOut(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SoftshrinkOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensor(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCooTensor1(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensor1(ptr, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCooTensor2(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensor2(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SparseMask(mask *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseMask(ptr, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SparseResize_(size []int64, sparseDim int64, denseDim int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseResize_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseResizeAndClear_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Sqrt(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrt(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sqrt_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrt_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SqrtOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrtOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Square(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSquare(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Square_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSquare_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Squeeze(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Squeeze1(dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze1(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Squeeze_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Squeeze1_(dim int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze1_(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Sspaddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSspaddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSspaddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Stack(tensors []Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgStack(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func StackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgStackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Std(unbiased bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { + cunbiased = int32(1) + } + lib.AtgStd(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Std1(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { + cunbiased = int32(1) + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgStd1(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) StdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { + cunbiased = int32(1) + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgStdOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Stft(nFft int64, hopLength int64, winLength int64, window *Tensor, normalized bool, onesided bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { + cnormalized = int32(1) + } + conesided := int32(0) + if onesided { + conesided = int32(1) + } + lib.AtgStft(ptr, ts.ctensor, nFft, hopLength, winLength, window.ctensor, cnormalized, conesided) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sub(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sub1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sub_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Sub1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SubOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sum(dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSum(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sum1(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgSum1(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SumOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgSumOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SumToSize(size []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSumToSize(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) T(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgT(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) T_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgT_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Take(index *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTake(ptr, ts.ctensor, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) TakeOut(out *Tensor, index *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTakeOut(ptr, out.ctensor, ts.ctensor, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Tan(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Tan_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTan_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) TanOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Tanh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Tanh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func TanhBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhBackward(ptr, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func TanhBackwardOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) TanhOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Tensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTensordot(ptr, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Threshold(threshold *Scalar, value *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThreshold(ptr, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Threshold_(threshold *Scalar, value *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThreshold_(ptr, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThresholdBackward(ptr, gradOutput.ctensor, ts.ctensor, threshold.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThresholdOut(ptr, out.ctensor, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) To(device gotch.Device, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTo(ptr, ts.ctensor, device.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) To1(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + ccopy := int32(0) + if copy { + ccopy = int32(1) + } + lib.AtgTo1(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) To2(dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + ccopy := int32(0) + if copy { + ccopy = int32(1) + } + lib.AtgTo2(ptr, ts.ctensor, dtype.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) To3(other *Tensor, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + ccopy := int32(0) + if copy { + ccopy = int32(1) + } + lib.AtgTo3(ptr, ts.ctensor, other.ctensor, cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) To4(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { + cnonBlocking = int32(1) + } + ccopy := int32(0) + if copy { + ccopy = int32(1) + } + lib.AtgTo4(ptr, ts.ctensor, device.CInt(), dtype.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ToDense(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToDense(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ToDenseBackward(grad *Tensor, input *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToDenseBackward(ptr, grad.ctensor, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ToMkldnn(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToMkldnn(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ToMkldnnBackward(grad *Tensor, input *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToMkldnnBackward(ptr, grad.ctensor, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ToSparse(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToSparse(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ToSparse1(sparseDim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToSparse1(ptr, ts.ctensor, sparseDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Totype(scalarType gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTotype(ptr, ts.ctensor, scalarType.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Trace(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrace(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Transpose(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Transpose_(dim0 int64, dim1 int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTranspose_(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Trapz(y *Tensor, x *Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrapz(ptr, y.ctensor, x.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Trapz1(y *Tensor, dx float64, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrapz1(ptr, y.ctensor, dx, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Tril(diagonal int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTril(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Tril_(diagonal int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTril_(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func TrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrilIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) TrilOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrilOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func TripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cswap := int32(0) + if swap { + cswap = int32(1) + } + lib.AtgTripletMarginLoss(ptr, anchor.ctensor, positive.ctensor, negative.ctensor, margin, p, eps, cswap, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Triu(diagonal int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriu(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Triu_(diagonal int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriu_(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func TriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriuIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) TriuOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriuOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) TrueDivide(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) TrueDivide1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) TrueDivide_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) TrueDivide1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) TrueDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Trunc(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrunc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Trunc_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrunc_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) TruncOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTruncOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) TypeAs(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTypeAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnfold(ptr, ts.ctensor, dimension, size, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Uniform_(from float64, to float64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUniform_(ptr, ts.ctensor, from, to) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Unsqueeze(dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnsqueeze(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Unsqueeze_(dim int64) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnsqueeze_(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBicubic2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleBicubic2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBilinear2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleBilinear2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleLinear1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleLinear1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleNearest1d(outputSize []int64, scales float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, len(outputSize), scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleNearest1dOut(out *Tensor, outputSize []int64, scales float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleNearest2d(outputSize []int64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleNearest3d(outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, len(outputSize), scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleTrilinear3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleTrilinear3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) UpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Values(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgValues(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Var(unbiased bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { + cunbiased = int32(1) + } + lib.AtgVar(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Var1(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { + cunbiased = int32(1) + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgVar1(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) VarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { + cunbiased = int32(1) + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgVarOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) View(size []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgView(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ViewAs(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgViewAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Where1(condition *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhere1(ptr, condition.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Zero_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZero_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Zeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZeros(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ZerosLike(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZerosLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ZerosOut(out *Tensor, size []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZerosOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +// End of implementing Tensor ================================= diff --git a/tensor/tensor.go b/tensor/tensor.go index 96cfcae..228c020 100644 --- a/tensor/tensor.go +++ b/tensor/tensor.go @@ -27,17 +27,17 @@ type Tensor struct { var None = NewTensor() // NewTensor creates a new tensor -func NewTensor() Tensor { +func NewTensor() *Tensor { ctensor := lib.AtNewTensor() - return Tensor{ctensor} + return &Tensor{ctensor} } -func (ts Tensor) Dim() uint64 { - retVal := lib.AtDim(ts.ctensor) +func (ts *Tensor) Dim() uint64 { + dim := lib.AtDim(ts.ctensor) if err := TorchErr(); err != nil { log.Fatal(err) } - return retVal + return dim } // Size return shape of the tensor @@ -45,89 +45,89 @@ func (ts Tensor) Dim() uint64 { // NOTE: C++ libtorch calls at_shape() -> t.sizes() // And returns a slice of sizes or shape using given pointer // to that slice. -func (ts Tensor) Size() (retVal []int64, err error) { +func (ts *Tensor) Size() ([]int64, error) { dim := lib.AtDim(ts.ctensor) sz := make([]int64, dim) szPtr, err := DataAsPtr(sz) if err != nil { - return retVal, err + return nil, err } defer C.free(unsafe.Pointer(szPtr)) lib.AtShape(ts.ctensor, szPtr) if err = TorchErr(); err != nil { - return retVal, err + return nil, err } - retVal = decodeSize(szPtr, dim) + shape := decodeSize(szPtr, dim) - return retVal, nil + return shape, nil } -func (ts Tensor) MustSize() (retVal []int64) { - retVal, err := ts.Size() +func (ts *Tensor) MustSize() []int64 { + shape, err := ts.Size() if err != nil { log.Fatal(err) } - return retVal + return shape } // Size1 returns the tensor size for 1D tensors. -func (ts Tensor) Size1() (retVal int64, err error) { +func (ts *Tensor) Size1() (int64, error) { shape, err := ts.Size() if err != nil { - return retVal, err + return 0, err } if len(shape) != 1 { err = fmt.Errorf("Expected one dim, got %v\n", len(shape)) - return retVal, err + return 0, err } return shape[0], nil } // Size2 returns the tensor size for 2D tensors. -func (ts Tensor) Size2() (retVal []int64, err error) { +func (ts *Tensor) Size2() ([]int64, error) { shape, err := ts.Size() if err != nil { - return retVal, err + return nil, err } if len(shape) != 2 { err = fmt.Errorf("Expected two dims, got %v\n", len(shape)) - return retVal, err + return nil, err } return shape, nil } // Size3 returns the tensor size for 3D tensors. -func (ts Tensor) Size3() (retVal []int64, err error) { +func (ts *Tensor) Size3() ([]int64, error) { shape, err := ts.Size() if err != nil { - return retVal, err + return nil, err } if len(shape) != 3 { err = fmt.Errorf("Expected three dims, got %v\n", len(shape)) - return retVal, err + return nil, err } return shape, nil } // Size4 returns the tensor size for 4D tensors. -func (ts Tensor) Size4() (retVal []int64, err error) { +func (ts *Tensor) Size4() ([]int64, error) { shape, err := ts.Size() if err != nil { - return retVal, err + return nil, err } if len(shape) != 4 { err = fmt.Errorf("Expected four dims, got %v\n", len(shape)) - return retVal, err + return nil, err } return shape, nil @@ -154,16 +154,16 @@ func decodeSize(ptr unsafe.Pointer, nsize uint64) []int64 { } // OfSlice creates tensor from a slice data -func OfSlice(data interface{}) (retVal Tensor, err error) { +func OfSlice(data interface{}) (*Tensor, error) { typ, dataLen, err := DataCheck(data) if err != nil { - return retVal, err + return nil, err } dtype, err := gotch.ToDType(typ) if err != nil { - return retVal, err + return nil, err } shape := []int64{int64(dataLen)} @@ -171,7 +171,7 @@ func OfSlice(data interface{}) (retVal Tensor, err error) { eltSizeInBytes, err := gotch.DTypeSize(dtype) if err != nil { - return retVal, err + return nil, err } nbytes := int(eltSizeInBytes) * int(elementNum) @@ -180,49 +180,46 @@ func OfSlice(data interface{}) (retVal Tensor, err error) { defer C.free(unsafe.Pointer(dataPtr)) if err = EncodeTensor(buff, reflect.ValueOf(data), shape); err != nil { - return retVal, err + return nil, err } cint, err := gotch.DType2CInt(dtype) if err != nil { - return retVal, err + return nil, err } ctensor := lib.AtTensorOfData(dataPtr, shape, uint(len(shape)), uint(eltSizeInBytes), int(cint)) if err = TorchErr(); err != nil { - return retVal, err + return nil, err } - retVal = Tensor{ctensor} - - return retVal, nil + return &Tensor{ctensor}, nil } // MustOfSlice create a tensor from slice of data. It will be panic if error. -func MustOfSlice(data interface{}) (retVal Tensor) { - retVal, err := OfSlice(data) +func MustOfSlice(data interface{}) *Tensor { + ts, err := OfSlice(data) if err != nil { log.Fatal(err) } - return retVal - + return ts } // TensorFrom create a tensor from slice of data. It will be panic if error. -func TensorFrom(data interface{}) (retVal Tensor) { - retVal, err := OfSlice(data) +func TensorFrom(data interface{}) *Tensor { + ts, err := OfSlice(data) if err != nil { log.Fatal(err) } - return retVal + return ts } // Print prints tensor values to console. // // NOTE: it is printed from C and will print ALL elements of tensor // with no truncation at all. -func (ts Tensor) Print() { +func (ts *Tensor) Print() { lib.AtPrint(ts.ctensor) if err := TorchErr(); err != nil { log.Fatal(err) @@ -230,56 +227,53 @@ func (ts Tensor) Print() { } // NewTensorFromData creates tensor from given data and shape -func NewTensorFromData(data interface{}, shape []int64) (retVal Tensor, err error) { +func NewTensorFromData(data interface{}, shape []int64) (*Tensor, error) { // 1. Check whether data and shape match elementNum, err := DataDim(data) if err != nil { - return retVal, err + return nil, err } nflattend := FlattenDim(shape) if elementNum != nflattend { err = fmt.Errorf("Number of data elements (%v) and flatten shape (%v) dimension mismatched.\n", elementNum, nflattend) - return retVal, err + return nil, err } // 2. Write raw data to C memory and get C pointer dataPtr, err := DataAsPtr(data) defer C.free(unsafe.Pointer(dataPtr)) if err != nil { - return retVal, err + return nil, err } // 3. Create tensor with pointer and shape dtype, err := gotch.DTypeFromData(data) if err != nil { - return retVal, err + return nil, err } eltSizeInBytes, err := gotch.DTypeSize(dtype) if err != nil { - return retVal, err + return nil, err } cint, err := gotch.DType2CInt(dtype) if err != nil { - return retVal, err + return nil, err } ctensor := lib.AtTensorOfData(dataPtr, shape, uint(len(shape)), uint(eltSizeInBytes), int(cint)) // defer C.free(unsafe.Pointer(ctensor)) if err = TorchErr(); err != nil { - return retVal, err + return nil, err } - retVal = Tensor{ctensor} - - return retVal, nil - + return &Tensor{ctensor}, nil } -func (ts Tensor) DType() gotch.DType { +func (ts *Tensor) DType() gotch.DType { cint := lib.AtScalarType(ts.ctensor) dtype, err := gotch.CInt2DType(cint) @@ -290,7 +284,11 @@ func (ts Tensor) DType() gotch.DType { return dtype } -func (ts Tensor) Device() (retVal gotch.Device, err error) { +func (ts *Tensor) Device() (gotch.Device, error) { + var ( + retVal gotch.Device + err error + ) cInt := lib.AtDevice(ts.ctensor) if err = TorchErr(); err != nil { @@ -302,13 +300,13 @@ func (ts Tensor) Device() (retVal gotch.Device, err error) { return device.OfCInt(int32(cInt)), nil } -func (ts Tensor) MustDevice() (retVal gotch.Device) { - retVal, err := ts.Device() +func (ts *Tensor) MustDevice() gotch.Device { + device, err := ts.Device() if err != nil { log.Fatal(err) } - return retVal + return device } /* @@ -342,121 +340,125 @@ func (ts Tensor) MustDevice() (retVal gotch.Device) { // Float64Value returns a float value on tensors holding a single element. // An error is returned otherwise. // double at_double_value_at_indexes(tensor, int64_t *indexes, int indexes_len); -func (ts Tensor) Float64Value(idx []int64) (retVal float64, err error) { +func (ts *Tensor) Float64Value(idx []int64) (float64, error) { idxPtr, err := DataAsPtr(idx) if err != nil { - return retVal, err + return 0, err } defer C.free(unsafe.Pointer(idxPtr)) - retVal = lib.AtDoubleValueAtIndexes(ts.ctensor, idxPtr, len(idx)) + f64Val := lib.AtDoubleValueAtIndexes(ts.ctensor, idxPtr, len(idx)) if err = TorchErr(); err != nil { - return retVal, err + return 0, err } - return retVal, err + return f64Val, err } -func (ts Tensor) MustFloat64Value(idx []int64) (retVal float64) { - retVal, err := ts.Float64Value(idx) +func (ts *Tensor) MustFloat64Value(idx []int64) float64 { + f64Val, err := ts.Float64Value(idx) if err != nil { log.Fatal(err) } - return retVal + return f64Val } // Int64Value returns an int value on tensors holding a single element. An error is // returned otherwise. -func (ts Tensor) Int64Value(idx []int64) (retVal int64, err error) { +func (ts *Tensor) Int64Value(idx []int64) (int64, error) { + var ( + retVal int64 + err error + ) idxPtr, err := DataAsPtr(idx) if err != nil { return retVal, err } defer C.free(unsafe.Pointer(idxPtr)) - retVal = lib.AtInt64ValueAtIndexes(ts.ctensor, idxPtr, len(idx)) + int64Val := lib.AtInt64ValueAtIndexes(ts.ctensor, idxPtr, len(idx)) if err = TorchErr(); err != nil { - return retVal, err + return 0, err } - return retVal, err + return int64Val, err } -func (ts Tensor) MustInt64Value(idx []int64) (retVal int64) { - retVal, err := ts.Int64Value(idx) +func (ts *Tensor) MustInt64Value(idx []int64) int64 { + int64Val, err := ts.Int64Value(idx) if err != nil { log.Fatal(err) } - return retVal + return int64Val } // RequiresGrad returns true if gradient are currently tracked for this tensor. -func (ts Tensor) RequiresGrad() (retVal bool, err error) { - retVal = lib.AtRequiresGrad(ts.ctensor) +func (ts *Tensor) RequiresGrad() (bool, error) { + state := lib.AtRequiresGrad(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return false, err } - return retVal, nil + return state, nil } -func (ts Tensor) MustRequiresGrad() (retVal bool) { - retVal, err := ts.RequiresGrad() +func (ts *Tensor) MustRequiresGrad() bool { + state, err := ts.RequiresGrad() if err != nil { log.Fatal(err) } - return retVal + return state } // DataPtr returns the address of the first element of this tensor. -func (ts Tensor) DataPtr() (retVal unsafe.Pointer, err error) { +func (ts *Tensor) DataPtr() (unsafe.Pointer, error) { - retVal = lib.AtDataPtr(ts.ctensor) + datPtr := lib.AtDataPtr(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } - return retVal, nil + return datPtr, nil } // Defined returns true is the tensor is defined. -func (ts Tensor) Defined() (retVal bool, err error) { - retVal = lib.AtDefined(ts.ctensor) +func (ts *Tensor) Defined() (bool, error) { + state := lib.AtDefined(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return false, err } - return retVal, nil + return state, nil } -func (ts Tensor) MustDefined() (retVal bool) { - retVal, err := ts.Defined() +func (ts *Tensor) MustDefined() bool { + state, err := ts.Defined() if err != nil { log.Fatal(err) } - return retVal + return state } // IsSparse returns true is the tensor is spare. -func (ts Tensor) IsSparse() (retVal bool, err error) { - retVal = lib.AtIsSparse(ts.ctensor) +func (ts *Tensor) IsSparse() (bool, error) { + state := lib.AtIsSparse(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return false, err } - return retVal, nil + return state, nil } // ZeroGrad zeroes the gradient tensor attached to this tensor if defined. -func (ts Tensor) ZeroGrad() { +func (ts *Tensor) ZeroGrad() { grad := ts.MustGrad(false) if grad.MustDefined() { grad.Detach_() @@ -468,26 +470,38 @@ func (ts Tensor) ZeroGrad() { // which gradients are tracked. // // Gradients tracking can be turned on via `SetRequiresGrad`. -func (ts Tensor) Backward() (err error) { +func (ts *Tensor) Backward() error { lib.AtBackward(ts.ctensor, 0, 0) - if err = TorchErr(); err != nil { + if err := TorchErr(); err != nil { return err } return nil } -func (ts Tensor) MustBackward() { +func (ts *Tensor) MustBackward() { if err := ts.Backward(); err != nil { log.Fatal(err) } } // RunBackward runs the backward ... -func RunBackward(tensors []Tensor, inputs []Tensor, keepGraphB bool, createGraphB bool) (retVal []Tensor, err error) { +func RunBackward(tensors []Tensor, inputs []Tensor, keepGraphB bool, createGraphB bool) ([]Tensor, error) { // NOTE: outputs is a slice of tensors with length = len(inputs) var outputsPtr []*lib.Ctensor - // TODO: Are they allocated continouslly??? + // Are they allocated contigously??? Definitely not. + // TODO. calculate C memory size = C pointer size x n pointers + // Then C.malloc such calculated amount + // NOTE. replace with the following code and test. + /* + * ntensors := len(inputs) + * nbytes := C.size_t(ntensors) * C.size_t(unsafe.Sizeof(uintptr(0))) + * ctensorsPtr := (*[1 << 30]lib.Ctensor)(C.malloc(nbytes)) + * for i :=0; i < ntensors; i++ { + * outputPtr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + * outputsPtr[i] = outputPtr + * } + * */ for i := 0; i < len(inputs); i++ { outputPtr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) defer C.free(unsafe.Pointer(outputPtr)) @@ -509,27 +523,28 @@ func RunBackward(tensors []Tensor, inputs []Tensor, keepGraphB bool, createGraph } lib.AtRunBackward(tensorsPtr, len(tensors), inputsPtr, len(inputs), outputsPtr[0], keepGraph, createGraph) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } + var oTensors []Tensor for i := 0; i < len(inputs); i++ { outputPtr := outputsPtr[i] - retVal = append(retVal, Tensor{ctensor: *outputPtr}) + oTensors = append(oTensors, Tensor{ctensor: *outputPtr}) } - return retVal, nil + return oTensors, nil } // CopyDataUint8 copies `numel` elements from `self` to `dst`. // // NOTE: `dst` located in Go memory. Should it be? -func (ts Tensor) CopyDataUint8(dst []uint8, numel uint) (err error) { +func (ts *Tensor) CopyDataUint8(dst []uint8, numel uint) error { // NOTE: we must make sure that `dst` has same len as `numel`. Otherwise, // there will be memory leak and or out of range error. if len(dst) < int(numel) { - err = fmt.Errorf("CopyDataUint8 Error: length of destination slice data (%v) is smaller than \nnumber of elements to be copied (%v)", len(dst), numel) + err := fmt.Errorf("CopyDataUint8 Error: length of destination slice data (%v) is smaller than \nnumber of elements to be copied (%v)", len(dst), numel) return err } @@ -546,7 +561,7 @@ func (ts Tensor) CopyDataUint8(dst []uint8, numel uint) (err error) { return nil } -func (ts Tensor) MustCopyDataUint8(dst []uint8, numel uint) { +func (ts *Tensor) MustCopyDataUint8(dst []uint8, numel uint) { err := ts.CopyDataUint8(dst, numel) if err != nil { log.Fatal(err) @@ -560,7 +575,7 @@ func (ts Tensor) MustCopyDataUint8(dst []uint8, numel uint) { // We will render Go pointer of first element of `dst` slice // and number of elements to C land. This may break in the future // if Go policy changes. -func (ts Tensor) CopyData(dst interface{}, numel uint) (err error) { +func (ts *Tensor) CopyData(dst interface{}, numel uint) error { gotype, dlen, err := DataCheck(dst) if err != nil { @@ -621,7 +636,7 @@ func (ts Tensor) CopyData(dst interface{}, numel uint) (err error) { // // NOTE: `dst` is a slice with length = numel and Go type equavalent to tensor // DType -func (ts Tensor) MustCopyData(dst interface{}, numel uint) { +func (ts *Tensor) MustCopyData(dst interface{}, numel uint) { err := ts.CopyData(dst, numel) if err != nil { log.Fatal(err) @@ -629,85 +644,80 @@ func (ts Tensor) MustCopyData(dst interface{}, numel uint) { } // Numel returns the total number of elements stored in a tensor. -func (ts Tensor) Numel() (retVal uint) { - var shape []int64 - shape = ts.MustSize() +func (ts *Tensor) Numel() uint { + shape := ts.MustSize() return uint(FlattenDim(shape)) } // ShallowClone returns a new tensor that share storage with the input tensor. -func (ts Tensor) ShallowClone() (retVal Tensor, err error) { +func (ts *Tensor) ShallowClone() (*Tensor, error) { ctensor := lib.AtShallowClone(ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } - retVal = Tensor{ctensor} - - return retVal, nil + return &Tensor{ctensor}, nil } // MustShallowClone returns a new tensor that share storage with the input // tensor. It will panic if error occurred -func (ts Tensor) MustShallowClone() (retVal Tensor) { - retVal, err := ts.ShallowClone() +func (ts *Tensor) MustShallowClone() *Tensor { + newTs, err := ts.ShallowClone() if err != nil { log.Fatal(err) } - return retVal + return newTs } // Get gets the sub-tensor at the given index. -func (ts Tensor) Get(index int) (retVal Tensor, err error) { +func (ts *Tensor) Get(index int) (*Tensor, error) { ctensor := lib.AtGet(ts.ctensor, index) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } - retVal = Tensor{ctensor} - return retVal, nil + return &Tensor{ctensor}, nil } // MustGet gets the sub-tensor at the given index. It will panic if error // occurred. -func (ts Tensor) MustGet(index int) (retVal Tensor) { - retVal, err := ts.Get(index) +func (ts *Tensor) MustGet(index int) *Tensor { + + subTs, err := ts.Get(index) if err != nil { log.Fatal(err) } - return retVal + + return subTs } // Copy_ copies in-place values from the argument tensor to the input tensor. -func Copy_(self, src Tensor) { - var err error - lib.AtCopy_(self.ctensor, src.ctensor) +func Copy_(self, src *Tensor) { - if err = TorchErr(); err != nil { + lib.AtCopy_(self.ctensor, src.ctensor) + if err := TorchErr(); err != nil { log.Fatal(err) } - } // Copy_ copies in-place values from the argument tensor to existing tensor -func (ts Tensor) Copy_(src Tensor) { - var err error - lib.AtCopy_(ts.ctensor, src.ctensor) +func (ts *Tensor) Copy_(src *Tensor) { - if err = TorchErr(); err != nil { + lib.AtCopy_(ts.ctensor, src.ctensor) + if err := TorchErr(); err != nil { log.Fatal(err) } } // Save saves a tensor to a file. -func (ts Tensor) Save(path string) (err error) { - lib.AtSave(ts.ctensor, path) +func (ts *Tensor) Save(path string) error { - if err = TorchErr(); err != nil { + lib.AtSave(ts.ctensor, path) + if err := TorchErr(); err != nil { return err } @@ -715,44 +725,43 @@ func (ts Tensor) Save(path string) (err error) { } // MustSave saves a tensor to a file. It will panic if error -func (ts Tensor) MustSave(path string) { +func (ts *Tensor) MustSave(path string) { if err := ts.Save(path); err != nil { log.Fatal(err) } } // Load loads a tensor from a file. -func Load(path string) (retVal Tensor, err error) { - ctensor := lib.AtLoad(path) +func Load(path string) (*Tensor, error) { - if err = TorchErr(); err != nil { - return retVal, err + ctensor := lib.AtLoad(path) + if err := TorchErr(); err != nil { + return nil, err } - retVal = Tensor{ctensor} - - return retVal, nil + return &Tensor{ctensor}, nil } // MustLoad loads a tensor to a file. It will panic if error -func MustLoad(path string) (retVal Tensor) { - retVal, err := Load(path) +func MustLoad(path string) *Tensor { + ts, err := Load(path) if err != nil { log.Fatal(err) } - return retVal + return ts } +// NamedTensor wraps C tensor and its name type NamedTensor struct { Name string - Tensor Tensor + Tensor *Tensor } // SaveMulti saves some named tensors to a file // // The file format is the same as the one used by the PyTorch C++ API. -func SaveMulti(namedTensors []NamedTensor, path string) (err error) { +func SaveMulti(namedTensors []NamedTensor, path string) error { var ctensors []lib.Ctensor var names []string @@ -762,7 +771,7 @@ func SaveMulti(namedTensors []NamedTensor, path string) (err error) { } lib.AtSaveMulti(ctensors, names, len(namedTensors), path) - if err = TorchErr(); err != nil { + if err := TorchErr(); err != nil { return err } @@ -780,69 +789,71 @@ func MustSaveMulti(namedTensors []NamedTensor, path string) { // LoadMulti loads some named tensors from a file // // The file format is the same as the one used by the PyTorch C++ API. -func LoadMulti(path string) (retVal []NamedTensor, err error) { +func LoadMulti(path string) ([]NamedTensor, error) { var data lib.LoadData dataPtr := lib.PStore.Set(&data) lib.AtLoadCallback(path, dataPtr) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } + var namedTensors []NamedTensor for _, v := range data.NamedCtensors { namedTensor := NamedTensor{ Name: v.Name, - Tensor: Tensor{v.Ctensor}, + Tensor: &Tensor{v.Ctensor}, } - retVal = append(retVal, namedTensor) + namedTensors = append(namedTensors, namedTensor) } - return retVal, nil + return namedTensors, nil } // MustLoadMulti loads some named tensors from a file. It will panic if error -func MustLoadMulti(path string) (retVal []NamedTensor) { - retVal, err := LoadMulti(path) +func MustLoadMulti(path string) []NamedTensor { + namedTensors, err := LoadMulti(path) if err != nil { log.Fatal(err) } - return retVal + return namedTensors } // LoadMultiWithDevice loads some named tensors from a file to a given device // // The file format is the same as the one used by the PyTorch C++ API. -func LoadMultiWithDevice(path string, device gotch.Device) (retVal []NamedTensor, err error) { +func LoadMultiWithDevice(path string, device gotch.Device) ([]NamedTensor, error) { var data lib.LoadData dataPtr := lib.PStore.Set(&data) lib.AtLoadCallbackWithDevice(path, dataPtr, device.CInt()) - if err = TorchErr(); err != nil { - return retVal, err + if err := TorchErr(); err != nil { + return nil, err } + var namedTensors []NamedTensor for _, v := range data.NamedCtensors { namedTensor := NamedTensor{ Name: v.Name, - Tensor: Tensor{v.Ctensor}, + Tensor: &Tensor{v.Ctensor}, } - retVal = append(retVal, namedTensor) + namedTensors = append(namedTensors, namedTensor) } - return retVal, nil + return namedTensors, nil } // MustLoadMulti loads some named tensors from a file. It will panic if error -func MustLoadMultiWithDevice(path string, device gotch.Device) (retVal []NamedTensor) { - retVal, err := LoadMultiWithDevice(path, device) +func MustLoadMultiWithDevice(path string, device gotch.Device) []NamedTensor { + namedTensors, err := LoadMultiWithDevice(path, device) if err != nil { log.Fatal(err) } - return retVal + return namedTensors } // ToString returns a string representation for the tensor. @@ -850,31 +861,31 @@ func MustLoadMultiWithDevice(path string, device gotch.Device) (retVal []NamedTe // lw : line width (size) // NOTE: The representation will contain all the tensor element hence may be huge for // large tensors. -func (ts Tensor) ToString(lw int64) (retVal string, err error) { - retVal = lib.AtToString(ts.ctensor, lw) - if err = TorchErr(); err != nil { - return retVal, err +func (ts *Tensor) ToString(lw int64) (string, error) { + tensorStr := lib.AtToString(ts.ctensor, lw) + if err := TorchErr(); err != nil { + return "", err } - return retVal, nil + return tensorStr, nil } // MustToString returns a string representation for the tensor. It will be panic // if error. // lw : line width (size) -func (ts Tensor) MustToString(lw int64) (retVal string) { - retVal, err := ts.ToString(lw) +func (ts *Tensor) MustToString(lw int64) string { + tensorStr, err := ts.ToString(lw) if err != nil { log.Fatal(err) } - return retVal + return tensorStr } // Drop drops (frees) the tensor -func (ts Tensor) Drop() (err error) { +func (ts *Tensor) Drop() error { lib.AtFree(ts.ctensor) - if err = TorchErr(); err != nil { + if err := TorchErr(); err != nil { return err } @@ -882,7 +893,7 @@ func (ts Tensor) Drop() (err error) { } // MustDrop drops the tensor. It will be panic if error -func (ts Tensor) MustDrop() { +func (ts *Tensor) MustDrop() { if err := ts.Drop(); err != nil { log.Fatal(err) } @@ -890,7 +901,7 @@ func (ts Tensor) MustDrop() { // GradSetEnabled sets globally whether GradMode gradient accumulation is enable or not. // It returns PREVIOUS state of Grad before setting. -func GradSetEnabled(b bool) (retVal bool, err error) { +func GradSetEnabled(b bool) (bool, error) { var cbool, cretVal int switch b { @@ -900,17 +911,21 @@ func GradSetEnabled(b bool) (retVal bool, err error) { cbool = 0 } + var ( + err error + state bool + ) cretVal = lib.AtGradSetEnabled(cbool) if err = TorchErr(); err != nil { - return retVal, err + return false, err } switch cretVal { case 0: - retVal = false + state = false break case 1: - retVal = true + state = true break // case -1: // should be unreachable as error is captured above with TorchrErr() // err = fmt.Errorf("Cannot set grad enable. \n") @@ -920,18 +935,18 @@ func GradSetEnabled(b bool) (retVal bool, err error) { // return retVal, err } - return retVal, nil + return state, nil } // MustGradSetEnabled sets globally whether GradMode gradient accumuation is enable or not. // It returns PREVIOUS state of Grad before setting. It will be panic if error -func MustGradSetEnabled(b bool) (retVal bool) { - retVal, err := GradSetEnabled(b) +func MustGradSetEnabled(b bool) bool { + state, err := GradSetEnabled(b) if err != nil { log.Fatal(err) } - return retVal + return state } // NoGrad runs a closure without keeping track of gradients. @@ -962,14 +977,14 @@ func NoGrad(fn interface{}) { } -func NoGrad1(fn func() interface{}) (retVal interface{}) { +func NoGrad1(fn func() interface{}) interface{} { newTs := NewTensor() newTs.Drop() // Switch off Grad prev := MustGradSetEnabled(false) - retVal = fn() + retVal := fn() // Switch on Grad _ = MustGradSetEnabled(prev) @@ -990,14 +1005,14 @@ type NoGradGuard struct { } // Init NoGradGuard and disables gradient tracking -func NewNoGradGuard() NoGradGuard { +func NewNoGradGuard() *NoGradGuard { return noGradGuardInit() } // Disables gradient tracking, this will be enabled back when the // returned value gets deallocated. -func noGradGuardInit() NoGradGuard { - return NoGradGuard{enabled: MustGradSetEnabled(false)} +func noGradGuardInit() *NoGradGuard { + return &NoGradGuard{enabled: MustGradSetEnabled(false)} } // Drop drops the NoGradGuard state. @@ -1025,7 +1040,7 @@ const ( ReductionOther ) -func (r Reduction) ToInt() (retVal int) { +func (r Reduction) ToInt() int { switch r { case ReductionNone: return 0 @@ -1036,11 +1051,13 @@ func (r Reduction) ToInt() (retVal int) { case ReductionOther: return 3 } - return + + // NOTE. should it be panic here instead of returning -1? + return -1 } // Float64Values returns values of tensor in a slice of float64. -func (ts Tensor) Float64Values() []float64 { +func (ts *Tensor) Float64Values() []float64 { numel := ts.Numel() vec := make([]float64, numel) @@ -1053,7 +1070,7 @@ func (ts Tensor) Float64Values() []float64 { } // Int64Values returns values of tensor in a slice of int64. -func (ts Tensor) Int64Values() []int64 { +func (ts *Tensor) Int64Values() []int64 { numel := ts.Numel() vec := make([]int64, numel) @@ -1068,10 +1085,12 @@ func (ts Tensor) Int64Values() []int64 { // Vals returns tensor values in a slice // NOTE: need a type insersion to get runtime type // E.g. res := xs.Vals().([]int64) -func (ts Tensor) Vals() (retVal interface{}) { +func (ts *Tensor) Vals() interface{} { dtype := ts.DType() numel := ts.Numel() + var retVal interface{} + switch dtype.Name() { case "uint8": retVal = make([]uint8, numel) @@ -1101,21 +1120,21 @@ func (ts Tensor) Vals() (retVal interface{}) { // // This returns a flattened version of the given tensor. The first dimension // is preserved as it is assumed to be the mini-batch dimension. -func (ts Tensor) FlatView() (retVal Tensor) { +func (ts *Tensor) FlatView() *Tensor { batchSize := ts.MustSize()[0] return ts.MustView([]int64{batchSize, -1}, false) } -func (ts Tensor) ZeroPad2d(left, right, top, bottom int64, del bool) (retVal Tensor, err error) { +func (ts *Tensor) ZeroPad2d(left, right, top, bottom int64, del bool) (*Tensor, error) { if ts.Dim() != 4 { - err = fmt.Errorf("Expected a 4 dimension tensor, got %v\n", ts.MustSize()) - return retVal, err + err := fmt.Errorf("Expected a 4 dimension tensor, got %v\n", ts.MustSize()) + return nil, err } return ts.ConstantPadNd([]int64{left, right, top, bottom}, del) } -func (ts Tensor) MustZeroPad2d(left, right, top, bottom int64, del bool) (retVal Tensor) { +func (ts *Tensor) MustZeroPad2d(left, right, top, bottom int64, del bool) *Tensor { retVal, err := ts.ZeroPad2d(left, right, top, bottom, del) if err != nil { log.Fatal(err) @@ -1131,32 +1150,32 @@ func (ts Tensor) MustZeroPad2d(left, right, top, bottom int64, del bool) (retVal // Elements of the input vector are expected to be between 0 and labels-1. // // NOTE: There's other `ts.OneHot` and `ts.MustOneHot` generated from Atg C++ API -func (ts Tensor) Onehot(labels int64) (retVal Tensor) { +func (ts *Tensor) Onehot(labels int64) *Tensor { dims := ts.MustSize() dims = append(dims, labels) unsqueezeTs := ts.MustUnsqueeze(-1, false) inputTs := unsqueezeTs.MustTotype(gotch.Int64, true) zerosTs := MustZeros(dims, gotch.Float, gotch.CPU) - retVal = zerosTs.MustScatter1(-1, inputTs, FloatScalar(1.0), true) + retVal := zerosTs.MustScatter1(-1, inputTs, FloatScalar(1.0), true) inputTs.MustDrop() return retVal } -func (ts Tensor) Swish() (retVal Tensor) { +func (ts *Tensor) Swish() *Tensor { sig := ts.MustSigmoid(false) - retVal = ts.MustMul(sig, false) + mulTs := ts.MustMul(sig, false) sig.MustDrop() - return retVal + return mulTs } -func (ts Tensor) AvgPool2DDefault(ksize int64, del bool) (retVal Tensor) { +func (ts *Tensor) AvgPool2DDefault(ksize int64, del bool) *Tensor { return ts.MustAvgPool2d([]int64{ksize, ksize}, []int64{ksize, ksize}, []int64{0, 0}, false, true, 1, del) } // SaveMultiNew saves a slice of named tensors to the given file path. -func SaveMultiNew(namedTensors []NamedTensor, path string) (err error) { +func SaveMultiNew(namedTensors []NamedTensor, path string) error { var ( tensors []lib.Ctensor names []string @@ -1168,7 +1187,7 @@ func SaveMultiNew(namedTensors []NamedTensor, path string) (err error) { } lib.AtSaveMultiNew(tensors, names, path) - if err = TorchErr(); err != nil { + if err := TorchErr(); err != nil { return err }