gotch/ts/tensor-generated.go
2023-07-26 21:43:21 +10:00

42567 lines
1.2 MiB

package ts
// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!
// #include "stdlib.h"
import "C"
import(
"unsafe"
"github.com/sugarme/gotch"
lib "github.com/sugarme/gotch/libtch"
)
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __And_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__And_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __AndTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__AndTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Iand_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Iand_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __IandTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IandTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Ilshift_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Ilshift_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __IlshiftTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IlshiftTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Ior_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Ior_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __IorTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IorTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Irshift_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Irshift_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __IrshiftTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IrshiftTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Ixor_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Ixor_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __IxorTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IxorTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Lshift_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Lshift_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __LshiftScalarOut_(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__LshiftScalarOut_(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __LshiftTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__LshiftTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __LshiftTensorOut_(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__LshiftTensorOut_(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Or_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Or_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __OrTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__OrTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Rshift_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Rshift_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __RshiftScalarOut_(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__RshiftScalarOut_(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __RshiftTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__RshiftTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __RshiftTensorOut_(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__RshiftTensorOut_(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __Xor_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Xor_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) __XorTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__XorTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.Atg_AdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AdaptiveAvgPool2dBackwardOut(out *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AdaptiveAvgPool2dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.Atg_AdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.Atg_AdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AdaptiveAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AdaptiveAvgPool3dBackwardOut(out *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AdaptiveAvgPool3dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.Atg_AdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddBatchDim(batchDim int64, level int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddBatchDim(ptr, ts.ctensor, batchDim, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddRelu(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddRelu(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddRelu_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddRelu_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddReluOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddReluOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddReluScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddReluScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddReluScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddReluScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddReluScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddReluScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddmmActivation(mat1 *Tensor, mat2 *Tensor, useGelu bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cuseGelu := int32(0)
if useGelu { cuseGelu = int32(1) }
lib.Atg_AddmmActivation(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor, cuseGelu)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AddmmActivationOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, useGelu bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cuseGelu := int32(0)
if useGelu { cuseGelu = int32(1) }
lib.Atg_AddmmActivationOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor, cuseGelu)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _Aminmax(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_Aminmax(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _AminmaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.Atg_AminmaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _AminmaxDimOut(out0 *Tensor, out1 *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.Atg_AminmaxDimOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _AminmaxOut(out0 *Tensor, out1 *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_AminmaxOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _AmpUpdateScale(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_AmpUpdateScale(ctensorPtr0, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AmpUpdateScale_(ptr, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AmpUpdateScaleOut(out *Tensor, growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AmpUpdateScaleOut(ptr, out.ctensor, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AutocastToFullPrecision(cudaEnabled bool, cpuEnabled bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccudaEnabled := int32(0)
if cudaEnabled { ccudaEnabled = int32(1) }
ccpuEnabled := int32(0)
if cpuEnabled { ccpuEnabled = int32(1) }
lib.Atg_AutocastToFullPrecision(ptr, ts.ctensor, ccudaEnabled, ccpuEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _AutocastToReducedPrecision(cudaEnabled bool, cpuEnabled bool, cudaDtype gotch.DType, cpuDtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccudaEnabled := int32(0)
if cudaEnabled { ccudaEnabled = int32(1) }
ccpuEnabled := int32(0)
if cpuEnabled { ccpuEnabled = int32(1) }
lib.Atg_AutocastToReducedPrecision(ptr, ts.ctensor, ccudaEnabled, ccpuEnabled, cudaDtype.CInt(), cpuDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CastByte(nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CastByte(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CastChar(nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CastChar(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CastDouble(nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CastDouble(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CastFloat(nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CastFloat(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CastHalf(nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CastHalf(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CastInt(nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CastInt(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CastLong(nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CastLong(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CastShort(nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CastShort(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_CdistBackward(ptr, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CdistBackwardOut(out *Tensor, grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_CdistBackwardOut(ptr, out.ctensor, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CholeskySolveHelper(a *Tensor, upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.Atg_CholeskySolveHelper(ptr, ts.ctensor, a.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CholeskySolveHelperOut(out *Tensor, a *Tensor, upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.Atg_CholeskySolveHelperOut(ptr, out.ctensor, ts.ctensor, a.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func _ChunkGradOutputsEfficientAttention(query *Tensor, key *Tensor, value *Tensor, isCausal bool)(retVal bool, err error) {
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
retVal = lib.Atg_ChunkGradOutputsEfficientAttention(query.ctensor, key.ctensor, value.ctensor, cisCausal)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Coalesce(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Coalesce(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CoalesceOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_CoalesceOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Coalesced(coalesced bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccoalesced := int32(0)
if coalesced { ccoalesced = int32(1) }
lib.Atg_Coalesced(ptr, ts.ctensor, ccoalesced)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Coalesced_(coalesced bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccoalesced := int32(0)
if coalesced { ccoalesced = int32(1) }
lib.Atg_Coalesced_(ptr, ts.ctensor, ccoalesced)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CoalescedOut(out *Tensor, coalesced bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccoalesced := int32(0)
if coalesced { ccoalesced = int32(1) }
lib.Atg_CoalescedOut(ptr, out.ctensor, ts.ctensor, ccoalesced)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _ComputeLinearCombination(input *Tensor, coefficients *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ComputeLinearCombination(ptr, input.ctensor, coefficients.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ComputeLinearCombinationOut(ptr, out.ctensor, input.ctensor, coefficients.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Conj(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Conj(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ConjCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ConjCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ConjCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ConjCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ConjPhysical(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ConjPhysical(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ConjPhysicalOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ConvDepthwise2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.Atg_ConvDepthwise2d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ConvDepthwise2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.Atg_ConvDepthwise2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
lib.Atg_ConvertIndicesFromCooToCsr(ptr, ts.ctensor, size, coutInt32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt32 bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
lib.Atg_ConvertIndicesFromCooToCsrOut(ptr, out.ctensor, ts.ctensor, size, coutInt32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _ConvertIndicesFromCsrToCoo(crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
ctranspose := int32(0)
if transpose { ctranspose = int32(1) }
lib.Atg_ConvertIndicesFromCsrToCoo(ptr, crowIndices.ctensor, colIndices.ctensor, coutInt32, ctranspose)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _ConvertIndicesFromCsrToCooOut(out *Tensor, crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
ctranspose := int32(0)
if transpose { ctranspose = int32(1) }
lib.Atg_ConvertIndicesFromCsrToCooOut(ptr, out.ctensor, crowIndices.ctensor, colIndices.ctensor, coutInt32, ctranspose)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
ctransposed := int32(0)
if transposed { ctransposed = int32(1) }
outputPaddingLen := len(outputPadding)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
ccudnnEnabled := int32(0)
if cudnnEnabled { ccudnnEnabled = int32(1) }
callowTf32 := int32(0)
if allowTf32 { callowTf32 = int32(1) }
lib.Atg_Convolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _ConvolutionDeprecated(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
ctransposed := int32(0)
if transposed { ctransposed = int32(1) }
outputPaddingLen := len(outputPadding)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
ccudnnEnabled := int32(0)
if cudnnEnabled { ccudnnEnabled = int32(1) }
lib.Atg_ConvolutionDeprecated(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups, cbenchmark, cdeterministic, ccudnnEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
dilationLen := len(dilation)
lib.Atg_ConvolutionMode(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _ConvolutionOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
ctransposed := int32(0)
if transposed { ctransposed = int32(1) }
outputPaddingLen := len(outputPadding)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
ccudnnEnabled := int32(0)
if cudnnEnabled { ccudnnEnabled = int32(1) }
callowTf32 := int32(0)
if allowTf32 { callowTf32 = int32(1) }
lib.Atg_ConvolutionOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CopyFrom(dst *Tensor, nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CopyFrom(ptr, ts.ctensor, dst.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CopyFromAndResize(dst *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_CopyFromAndResize(ptr, ts.ctensor, dst.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CopyFromAndResizeOut(out *Tensor, dst *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_CopyFromAndResizeOut(ptr, out.ctensor, ts.ctensor, dst.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _CopyFromOut(out *Tensor, dst *Tensor, nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_CopyFromOut(ptr, out.ctensor, ts.ctensor, dst.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
inputLengthsLen := len(inputLengths)
targetLengthsLen := len(targetLengths)
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputLengthsLen := len(inputLengths)
targetLengthsLen := len(targetLengths)
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CtcLossBackward(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CtcLossBackwardOut(out *Tensor, grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputLengthsLen := len(inputLengths)
targetLengthsLen := len(targetLengths)
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CtcLossBackwardOut(ptr, out.ctensor, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CtcLossBackwardTensor(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CtcLossBackwardTensor(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CtcLossOut(out0 *Tensor, out1 *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
inputLengthsLen := len(inputLengths)
targetLengthsLen := len(targetLengths)
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CtcLossOut(ctensorPtr0, out0.ctensor, out1.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CtcLossTensor(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CtcLossTensorOut(out0 *Tensor, out1 *Tensor, logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CtcLossTensorOut(ctensorPtr0, out0.ctensor, out1.ctensor, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
inputLengthsLen := len(inputLengths)
targetLengthsLen := len(targetLengths)
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CudnnCtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, cdeterministic, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CudnnCtcLossOut(out0 *Tensor, out1 *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
inputLengthsLen := len(inputLengths)
targetLengthsLen := len(targetLengths)
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CudnnCtcLossOut(ctensorPtr0, out0.ctensor, out1.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, cdeterministic, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CudnnCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.Atg_CudnnCtcLossTensor(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, cdeterministic, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.Atg_CudnnInitDropoutState(ptr, dropout, ctrain, dropoutSeed, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CudnnInitDropoutStateOut(out *Tensor, dropout float64, train bool, dropoutSeed int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.Atg_CudnnInitDropoutStateOut(ptr, out.ctensor, dropout, ctrain, dropoutSeed)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CudnnRnn(input *Tensor, weight []*Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
var cweight []lib.Ctensor
for _, t := range weight {cweight = append(cweight, t.ctensor)}
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
batchSizesLen := len(batchSizes)
lib.Atg_CudnnRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, weightBuf.ctensor, hx.ctensor, cx.ctensor, mode, hiddenSize, projSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, batchSizesLen, dropoutState.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CudnnRnnFlattenWeight(weightArr []*Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cweightArr []lib.Ctensor
for _, t := range weightArr {cweightArr = append(cweightArr, t.ctensor)}
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
lib.Atg_CudnnRnnFlattenWeight(ptr, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, cbatchFirst, cbidirectional)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _CudnnRnnFlattenWeightOut(out *Tensor, weightArr []*Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cweightArr []lib.Ctensor
for _, t := range weightArr {cweightArr = append(cweightArr, t.ctensor)}
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
lib.Atg_CudnnRnnFlattenWeightOut(ptr, out.ctensor, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, cbatchFirst, cbidirectional)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _CudnnRnnOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, out4 *Tensor, input *Tensor, weight []*Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
var cweight []lib.Ctensor
for _, t := range weight {cweight = append(cweight, t.ctensor)}
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
batchSizesLen := len(batchSizes)
lib.Atg_CudnnRnnOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, out4.ctensor, input.ctensor, cweight, len(cweight), weightStride0, weightBuf.ctensor, hx.ctensor, cx.ctensor, mode, hiddenSize, projSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, batchSizesLen, dropoutState.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
// func.returns = `int64`:
// --------------------------
func _CufftGetPlanCacheMaxSize(deviceIndex int64)(retVal int64, err error) {
retVal = lib.Atg_CufftGetPlanCacheMaxSize(deviceIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func _CufftGetPlanCacheSize(deviceIndex int64)(retVal int64, err error) {
retVal = lib.Atg_CufftGetPlanCacheSize(deviceIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) _DebugHasInternalOverlap(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.Atg_DebugHasInternalOverlap(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _DimArange(like *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_DimArange(ptr, like.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) _Dimi(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.Atg_Dimi(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) _Dimv(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.Atg_Dimv(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_DirichletGrad(ptr, x.ctensor, alpha.ctensor, total.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _DirichletGradOut(out *Tensor, x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_DirichletGradOut(ptr, out.ctensor, x.ctensor, alpha.ctensor, total.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _EfficientAttentionBackward(gradOut_ *Tensor, query *Tensor, key *Tensor, value *Tensor, out *Tensor, logsumexp *Tensor, isCausal bool, chunkGradOutputs bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
cchunkGradOutputs := int32(0)
if chunkGradOutputs { cchunkGradOutputs = int32(1) }
lib.Atg_EfficientAttentionBackward(ctensorPtr0, gradOut_.ctensor, query.ctensor, key.ctensor, value.ctensor, out.ctensor, logsumexp.ctensor, cisCausal, cchunkGradOutputs)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func _Efficientzerotensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_Efficientzerotensor(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EfficientzerotensorOut(out *Tensor, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_EfficientzerotensorOut(ptr, out.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
cincludeLastOffset := int32(0)
if includeLastOffset { cincludeLastOffset = int32(1) }
lib.Atg_EmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
lib.Atg_EmbeddingBagBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
lib.Atg_EmbeddingBagDenseBackward(ptr, grad.ctensor, indices.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmbeddingBagDenseBackwardOut(out *Tensor, grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
lib.Atg_EmbeddingBagDenseBackwardOut(ptr, out.ctensor, grad.ctensor, indices.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _EmbeddingBagForwardOnly(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
cincludeLastOffset := int32(0)
if includeLastOffset { cincludeLastOffset = int32(1) }
lib.Atg_EmbeddingBagForwardOnly(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _EmbeddingBagForwardOnlyOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
cincludeLastOffset := int32(0)
if includeLastOffset { cincludeLastOffset = int32(1) }
lib.Atg_EmbeddingBagForwardOnlyOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _EmbeddingBagOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
cincludeLastOffset := int32(0)
if includeLastOffset { cincludeLastOffset = int32(1) }
lib.Atg_EmbeddingBagOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_EmbeddingBagPerSampleWeightsBackward(ptr, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmbeddingBagPerSampleWeightsBackwardOut(out *Tensor, grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_EmbeddingBagPerSampleWeightsBackwardOut(ptr, out.ctensor, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
lib.Atg_EmbeddingBagSparseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_EmptyAffineQuantized(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt(), scale, zeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmptyAffineQuantizedOut(out *Tensor, size []int64, scale float64, zeroPoint int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_EmptyAffineQuantizedOut(ptr, out.ctensor, size, sizeLen, scale, zeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_EmptyPerChannelAffineQuantized(ptr, size, sizeLen, scales.ctensor, zeroPoints.ctensor, axis, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EmptyPerChannelAffineQuantizedOut(out *Tensor, size []int64, scales *Tensor, zeroPoints *Tensor, axis int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_EmptyPerChannelAffineQuantizedOut(ptr, out.ctensor, size, sizeLen, scales.ctensor, zeroPoints.ctensor, axis)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EuclideanDist(x1 *Tensor, x2 *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_EuclideanDist(ptr, x1.ctensor, x2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _EuclideanDistOut(out *Tensor, x1 *Tensor, x2 *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_EuclideanDistOut(ptr, out.ctensor, x1.ctensor, x2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FakeQuantizeLearnablePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FakeQuantizeLearnablePerChannelAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FakeQuantizeLearnablePerChannelAffineOut(out *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FakeQuantizeLearnablePerChannelAffineOut(ptr, out.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FakeQuantizeLearnablePerTensorAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FakeQuantizeLearnablePerTensorAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FakeQuantizeLearnablePerTensorAffineOut(out *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FakeQuantizeLearnablePerTensorAffineOut(ptr, out.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FakeQuantizePerTensorAffineCachemaskTensorQparams(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, fakeQuantEnabled.ctensor, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FakeQuantizePerTensorAffineCachemaskTensorQparamsOut(out0 *Tensor, out1 *Tensor, scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FakeQuantizePerTensorAffineCachemaskTensorQparamsOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, fakeQuantEnabled.ctensor, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FftC2c(dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
cforward := int32(0)
if forward { cforward = int32(1) }
lib.Atg_FftC2c(ptr, ts.ctensor, dim, dimLen, normalization, cforward)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FftC2cOut(out *Tensor, dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
cforward := int32(0)
if forward { cforward = int32(1) }
lib.Atg_FftC2cOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, normalization, cforward)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FftC2r(dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.Atg_FftC2r(ptr, ts.ctensor, dim, dimLen, normalization, lastDimSize)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FftC2rOut(out *Tensor, dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.Atg_FftC2rOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, normalization, lastDimSize)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FftR2c(dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
conesided := int32(0)
if onesided { conesided = int32(1) }
lib.Atg_FftR2c(ptr, ts.ctensor, dim, dimLen, normalization, conesided)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FftR2cOut(out *Tensor, dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
conesided := int32(0)
if onesided { conesided = int32(1) }
lib.Atg_FftR2cOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, normalization, conesided)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _FlashAttentionBackward(gradOut *Tensor, query *Tensor, key *Tensor, value *Tensor, out *Tensor, logsumexp *Tensor, cumSeqQ *Tensor, cumSeqK *Tensor, maxQ int64, maxK int64, dropoutP float64, isCausal bool, philoxSeed int64, philoxOffset int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
lib.Atg_FlashAttentionBackward(ctensorPtr0, gradOut.ctensor, query.ctensor, key.ctensor, value.ctensor, out.ctensor, logsumexp.ctensor, cumSeqQ.ctensor, cumSeqK.ctensor, maxQ, maxK, dropoutP, cisCausal, philoxSeed, philoxOffset)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Foobar(arg1 bool, arg2 bool, arg3 bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
carg1 := int32(0)
if arg1 { carg1 = int32(1) }
carg2 := int32(0)
if arg2 { carg2 = int32(1) }
carg3 := int32(0)
if arg3 { carg3 = int32(1) }
lib.Atg_Foobar(ptr, ts.ctensor, carg1, carg2, carg3)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FoobarOut(out *Tensor, arg1 bool, arg2 bool, arg3 bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
carg1 := int32(0)
if arg1 { carg1 = int32(1) }
carg2 := int32(0)
if arg2 { carg2 = int32(1) }
carg3 := int32(0)
if arg3 { carg3 = int32(1) }
lib.Atg_FoobarOut(ptr, out.ctensor, ts.ctensor, carg1, carg2, carg3)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FusedDropout(p float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FusedDropout(ctensorPtr0, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FusedDropoutOut(out0 *Tensor, out1 *Tensor, p float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FusedDropoutOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FusedMovingAvgObsFqHelper(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cperRowFakeQuant := int32(0)
if perRowFakeQuant { cperRowFakeQuant = int32(1) }
csymmetricQuant := int32(0)
if symmetricQuant { csymmetricQuant = int32(1) }
lib.Atg_FusedMovingAvgObsFqHelper(ctensorPtr0, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FusedMovingAvgObsFqHelperFunctional(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr5 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr4)) + unsafe.Sizeof(ctensorPtr0)))
cperRowFakeQuant := int32(0)
if perRowFakeQuant { cperRowFakeQuant = int32(1) }
csymmetricQuant := int32(0)
if symmetricQuant { csymmetricQuant = int32(1) }
lib.Atg_FusedMovingAvgObsFqHelperFunctional(ctensorPtr0, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
retVal5 = &Tensor{ctensor: *ctensorPtr5}
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _FusedMovingAvgObsFqHelperOut(out0 *Tensor, out1 *Tensor, observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cperRowFakeQuant := int32(0)
if perRowFakeQuant { cperRowFakeQuant = int32(1) }
csymmetricQuant := int32(0)
if symmetricQuant { csymmetricQuant = int32(1) }
lib.Atg_FusedMovingAvgObsFqHelperOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `int64`:
// --------------------------
func _FusedSdpChoice(query *Tensor, key *Tensor, value *Tensor, attnMask *Tensor, dropoutP float64, isCausal bool)(retVal int64, err error) {
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
retVal = lib.Atg_FusedSdpChoice(query.ctensor, key.ctensor, value.ctensor, attnMask.ctensor, dropoutP, cisCausal)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FwPrimal(level int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FwPrimal(ptr, ts.ctensor, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FwPrimalCopy(level int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FwPrimalCopy(ptr, ts.ctensor, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _FwPrimalCopyOut(out *Tensor, level int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FwPrimalCopyOut(ptr, out.ctensor, ts.ctensor, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_GatherSparseBackward(ptr, ts.ctensor, dim, index.ctensor, grad.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.Atg_GridSampler2dCpuFallback(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.Atg_GridSampler2dCpuFallbackBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func _GridSampler2dCpuFallbackOut(out *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.Atg_GridSampler2dCpuFallbackOut(ptr, out.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) _HasCompatibleShallowCopyType(from *Tensor, del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.Atg_HasCompatibleShallowCopyType(ts.ctensor, from.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) _HasSameStorageNumel(other *Tensor, del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.Atg_HasSameStorageNumel(ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _HistogramddFromBinCts(out *Tensor, bins []int64, rangeVals []float64, weight *Tensor, density bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
binsLen := len(bins)
rangeValsLen := len(rangeVals)
cdensity := int32(0)
if density { cdensity = int32(1) }
lib.Atg_HistogramddFromBinCts(ptr, out.ctensor, ts.ctensor, bins, binsLen, rangeVals, rangeValsLen, weight.ctensor, cdensity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _HistogramddFromBinTensors(bins []*Tensor, weight *Tensor, density bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cbins []lib.Ctensor
for _, t := range bins {cbins = append(cbins, t.ctensor)}
cdensity := int32(0)
if density { cdensity = int32(1) }
lib.Atg_HistogramddFromBinTensors(ptr, ts.ctensor, cbins, len(cbins), weight.ctensor, cdensity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _HistogramddFromBinTensorsOut(out *Tensor, bins []*Tensor, weight *Tensor, density bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cbins []lib.Ctensor
for _, t := range bins {cbins = append(cbins, t.ctensor)}
cdensity := int32(0)
if density { cdensity = int32(1) }
lib.Atg_HistogramddFromBinTensorsOut(ptr, out.ctensor, ts.ctensor, cbins, len(cbins), weight.ctensor, cdensity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _IndexPutImpl(indices []*Tensor, values *Tensor, accumulate bool, unsafety bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cindices []lib.Ctensor
for _, t := range indices {cindices = append(cindices, t.ctensor)}
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
cunsafety := int32(0)
if unsafety { cunsafety = int32(1) }
lib.Atg_IndexPutImpl(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate, cunsafety)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _IndexPutImplOut(out *Tensor, indices []*Tensor, values *Tensor, accumulate bool, unsafety bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cindices []lib.Ctensor
for _, t := range indices {cindices = append(cindices, t.ctensor)}
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
cunsafety := int32(0)
if unsafety { cunsafety = int32(1) }
lib.Atg_IndexPutImplOut(ptr, out.ctensor, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate, cunsafety)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Indices(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Indices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _IndicesCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_IndicesCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _IndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_IndicesCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _IsAllTrue(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_IsAllTrue(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _IsAnyTrue(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_IsAnyTrue(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) _IsZerotensor(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.Atg_IsZerotensor(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgDet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_LinalgDet(ctensorPtr0, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgDetResult(result *Tensor, lU *Tensor, pivots *Tensor, a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_LinalgDetResult(ctensorPtr0, result.ctensor, lU.ctensor, pivots.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgEigh(a *Tensor, uPLO string, computeV bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ccomputeV := int32(0)
if computeV { ccomputeV = int32(1) }
lib.Atg_LinalgEigh(ctensorPtr0, a.ctensor, uPLO, ccomputeV)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgEighEigenvalues(eigenvalues *Tensor, eigenvectors *Tensor, a *Tensor, uPLO string, computeV bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ccomputeV := int32(0)
if computeV { ccomputeV = int32(1) }
lib.Atg_LinalgEighEigenvalues(ctensorPtr0, eigenvalues.ctensor, eigenvectors.ctensor, a.ctensor, uPLO, ccomputeV)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgSlogdet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_LinalgSlogdet(ctensorPtr0, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgSlogdetSign(sign *Tensor, logabsdet *Tensor, lU *Tensor, pivots *Tensor, a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_LinalgSlogdetSign(ctensorPtr0, sign.ctensor, logabsdet.ctensor, lU.ctensor, pivots.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgSolveEx(a *Tensor, b *Tensor, left bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cleft := int32(0)
if left { cleft = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.Atg_LinalgSolveEx(ctensorPtr0, a.ctensor, b.ctensor, cleft, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgSolveExResult(result *Tensor, lU *Tensor, pivots *Tensor, info *Tensor, a *Tensor, b *Tensor, left bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cleft := int32(0)
if left { cleft = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.Atg_LinalgSolveExResult(ctensorPtr0, result.ctensor, lU.ctensor, pivots.ctensor, info.ctensor, a.ctensor, b.ctensor, cleft, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgSvd(a *Tensor, fullMatrices bool, computeUv bool, driver string)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cfullMatrices := int32(0)
if fullMatrices { cfullMatrices = int32(1) }
ccomputeUv := int32(0)
if computeUv { ccomputeUv = int32(1) }
lib.Atg_LinalgSvd(ctensorPtr0, a.ctensor, cfullMatrices, ccomputeUv, driver)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool, computeUv bool, driver string)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cfullMatrices := int32(0)
if fullMatrices { cfullMatrices = int32(1) }
ccomputeUv := int32(0)
if computeUv { ccomputeUv = int32(1) }
lib.Atg_LinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, a.ctensor, cfullMatrices, ccomputeUv, driver)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat { chalfToFloat = int32(1) }
lib.Atg_LogSoftmax(ptr, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_LogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_LogSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat { chalfToFloat = int32(1) }
lib.Atg_LogSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Logcumsumexp(dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Logcumsumexp(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_LogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LstmMps(input *Tensor, hx []*Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr5 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr4)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {chx = append(chx, t.ctensor)}
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.Atg_LstmMps(ctensorPtr0, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
retVal5 = &Tensor{ctensor: *ctensorPtr5}
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _LstmMpsOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, out4 *Tensor, out5 *Tensor, input *Tensor, hx []*Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr5 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr4)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {chx = append(chx, t.ctensor)}
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.Atg_LstmMpsOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, out4.ctensor, out5.ctensor, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
retVal5 = &Tensor{ctensor: *ctensorPtr5}
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _LuWithInfo(pivot bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot { cpivot = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.Atg_LuWithInfo(ctensorPtr0, ts.ctensor, cpivot, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func _MakeDual(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakeDual(ptr, primal.ctensor, tangent.ctensor, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _MakeDualCopy(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakeDualCopy(ptr, primal.ctensor, tangent.ctensor, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _MakeDualCopyOut(out *Tensor, primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakeDualCopyOut(ptr, out.ctensor, primal.ctensor, tangent.ctensor, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakePerChannelQuantizedTensor(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MakePerChannelQuantizedTensorOut(out *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakePerChannelQuantizedTensorOut(ptr, out.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakePerTensorQuantizedTensor(ptr, ts.ctensor, scale, zeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MakePerTensorQuantizedTensorOut(out *Tensor, scale float64, zeroPoint int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakePerTensorQuantizedTensorOut(ptr, out.ctensor, ts.ctensor, scale, zeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MaskedScale(mask *Tensor, scale float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MaskedScale(ptr, ts.ctensor, mask.ctensor, scale)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MaskedScaleOut(out *Tensor, mask *Tensor, scale float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MaskedScaleOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, scale)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MaskedSoftmax(mask *Tensor, dim []int64, maskType []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
var cmaskTypeVal int64 = 0
var cmaskTypeNull int = 1
if len(maskType) > 0 {
cmaskTypeVal = maskType[0]
cmaskTypeNull = 0
}
lib.Atg_MaskedSoftmax(ptr, ts.ctensor, mask.ctensor, cdimVal, cdimNull, cmaskTypeVal, cmaskTypeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _MaskedSoftmaxBackward(gradOutput *Tensor, output *Tensor, mask *Tensor, dim []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.Atg_MaskedSoftmaxBackward(ptr, gradOutput.ctensor, output.ctensor, mask.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _MaskedSoftmaxBackwardOut(out *Tensor, gradOutput *Tensor, output *Tensor, mask *Tensor, dim []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.Atg_MaskedSoftmaxBackwardOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, mask.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MaskedSoftmaxOut(out *Tensor, mask *Tensor, dim []int64, maskType []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
var cmaskTypeVal int64 = 0
var cmaskTypeNull int = 1
if len(maskType) > 0 {
cmaskTypeVal = maskType[0]
cmaskTypeNull = 0
}
lib.Atg_MaskedSoftmaxOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, cdimVal, cdimNull, cmaskTypeVal, cmaskTypeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MkldnnReshape(shape []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
shapeLen := len(shape)
lib.Atg_MkldnnReshape(ptr, ts.ctensor, shape, shapeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MkldnnReshapeOut(out *Tensor, shape []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
shapeLen := len(shape)
lib.Atg_MkldnnReshapeOut(ptr, out.ctensor, ts.ctensor, shape, shapeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MkldnnTranspose(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MkldnnTranspose_(dim0 int64, dim1 int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MkldnnTranspose_(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MkldnnTransposeOut(out *Tensor, dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MkldnnTransposeOut(ptr, out.ctensor, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MpsConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
lib.Atg_MpsConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MpsConvolutionOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
lib.Atg_MpsConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MpsConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
strideLen := len(stride)
dilationLen := len(dilation)
lib.Atg_MpsConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _MpsConvolutionTransposeOut(out *Tensor, weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
strideLen := len(stride)
dilationLen := len(dilation)
lib.Atg_MpsConvolutionTransposeOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeBatchNormLegit(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.Atg_NativeBatchNormLegit(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeBatchNormLegitFunctional(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.Atg_NativeBatchNormLegitFunctional(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeBatchNormLegitNoStats(input *Tensor, weight *Tensor, bias *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.Atg_NativeBatchNormLegitNoStats(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeBatchNormLegitNoStatsOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.Atg_NativeBatchNormLegitNoStatsOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeBatchNormLegitOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.Atg_NativeBatchNormLegitOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeDecoderOnlyMultiHeadAttention(query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor, incrKey *Tensor, incrValue *Tensor, needWeights bool, averageAttnWeights bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cneedWeights := int32(0)
if needWeights { cneedWeights = int32(1) }
caverageAttnWeights := int32(0)
if averageAttnWeights { caverageAttnWeights = int32(1) }
lib.Atg_NativeDecoderOnlyMultiHeadAttention(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor, incrKey.ctensor, incrValue.ctensor, cneedWeights, caverageAttnWeights)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeDecoderOnlyMultiHeadAttentionOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor, incrKey *Tensor, incrValue *Tensor, needWeights bool, averageAttnWeights bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cneedWeights := int32(0)
if needWeights { cneedWeights = int32(1) }
caverageAttnWeights := int32(0)
if averageAttnWeights { caverageAttnWeights = int32(1) }
lib.Atg_NativeDecoderOnlyMultiHeadAttentionOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor, incrKey.ctensor, incrValue.ctensor, cneedWeights, caverageAttnWeights)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeMultiHeadAttention(query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor, needWeights bool, averageAttnWeights bool, maskType []int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cneedWeights := int32(0)
if needWeights { cneedWeights = int32(1) }
caverageAttnWeights := int32(0)
if averageAttnWeights { caverageAttnWeights = int32(1) }
var cmaskTypeVal int64 = 0
var cmaskTypeNull int = 1
if len(maskType) > 0 {
cmaskTypeVal = maskType[0]
cmaskTypeNull = 0
}
lib.Atg_NativeMultiHeadAttention(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor, cneedWeights, caverageAttnWeights, cmaskTypeVal, cmaskTypeNull)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _NativeMultiHeadAttentionOut(out0 *Tensor, out1 *Tensor, query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor, needWeights bool, averageAttnWeights bool, maskType []int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cneedWeights := int32(0)
if needWeights { cneedWeights = int32(1) }
caverageAttnWeights := int32(0)
if averageAttnWeights { caverageAttnWeights = int32(1) }
var cmaskTypeVal int64 = 0
var cmaskTypeNull int = 1
if len(maskType) > 0 {
cmaskTypeVal = maskType[0]
cmaskTypeNull = 0
}
lib.Atg_NativeMultiHeadAttentionOut(ctensorPtr0, out0.ctensor, out1.ctensor, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor, cneedWeights, caverageAttnWeights, cmaskTypeVal, cmaskTypeNull)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NegView(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NegView(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NegViewCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NegViewCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NegViewCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NegViewCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _NestedFromPadded(padded *Tensor, cpuNestedShapeExample *Tensor, fuseTransform0213 bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cfuseTransform0213 := int32(0)
if fuseTransform0213 { cfuseTransform0213 = int32(1) }
lib.Atg_NestedFromPadded(ptr, padded.ctensor, cpuNestedShapeExample.ctensor, cfuseTransform0213)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _NestedFromPaddedAndNestedExample(padded *Tensor, ntExample *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NestedFromPaddedAndNestedExample(ptr, padded.ctensor, ntExample.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _NestedFromPaddedAndNestedExampleOut(out *Tensor, padded *Tensor, ntExample *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NestedFromPaddedAndNestedExampleOut(ptr, out.ctensor, padded.ctensor, ntExample.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _NestedFromPaddedOut(out *Tensor, padded *Tensor, cpuNestedShapeExample *Tensor, fuseTransform0213 bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cfuseTransform0213 := int32(0)
if fuseTransform0213 { cfuseTransform0213 = int32(1) }
lib.Atg_NestedFromPaddedOut(ptr, out.ctensor, padded.ctensor, cpuNestedShapeExample.ctensor, cfuseTransform0213)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NestedSelectBackward(gradOutput *Tensor, dim int64, index int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NestedSelectBackward(ptr, gradOutput.ctensor, ts.ctensor, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NestedSumBackward(grad *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.Atg_NestedSumBackward(ptr, grad.ctensor, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NestedViewFromBuffer(nestedSize *Tensor, nestedStrides *Tensor, offsets []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
offsetsLen := len(offsets)
lib.Atg_NestedViewFromBuffer(ptr, ts.ctensor, nestedSize.ctensor, nestedStrides.ctensor, offsets, offsetsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NestedViewFromBufferCopy(nestedSize *Tensor, nestedStrides *Tensor, offsets []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
offsetsLen := len(offsets)
lib.Atg_NestedViewFromBufferCopy(ptr, ts.ctensor, nestedSize.ctensor, nestedStrides.ctensor, offsets, offsetsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NestedViewFromBufferCopyOut(out *Tensor, nestedSize *Tensor, nestedStrides *Tensor, offsets []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
offsetsLen := len(offsets)
lib.Atg_NestedViewFromBufferCopyOut(ptr, out.ctensor, ts.ctensor, nestedSize.ctensor, nestedStrides.ctensor, offsets, offsetsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NewZerosWithSameFeatureMeta(other *Tensor, selfNumBatchDims int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NewZerosWithSameFeatureMeta(ptr, ts.ctensor, other.ctensor, selfNumBatchDims)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _NewZerosWithSameFeatureMetaOut(out *Tensor, other *Tensor, selfNumBatchDims int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NewZerosWithSameFeatureMetaOut(ptr, out.ctensor, ts.ctensor, other.ctensor, selfNumBatchDims)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func _NnpackAvailable()(retVal bool, err error) {
retVal = lib.Atg_NnpackAvailable()
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
lib.Atg_NnpackSpatialConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _NnpackSpatialConvolutionOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
lib.Atg_NnpackSpatialConvolutionOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) _Nnz(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.Atg_Nnz(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.Atg_PackPaddedSequence(ctensorPtr0, input.ctensor, lengths.ctensor, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func _PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizeLen := len(inputSize)
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.Atg_PackPaddedSequenceBackward(ptr, grad.ctensor, inputSize, inputSizeLen, batchSizes.ctensor, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _PackPaddedSequenceOut(out0 *Tensor, out1 *Tensor, input *Tensor, lengths *Tensor, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.Atg_PackPaddedSequenceOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, lengths.ctensor, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _PadCircular(pad []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
padLen := len(pad)
lib.Atg_PadCircular(ptr, ts.ctensor, pad, padLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _PadEnum(pad []int64, mode int64, value []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
padLen := len(pad)
var cvalueVal float64 = 0.0
var cvalueNull int = 1
if len(value) > 0 {
cvalueVal = value[0]
cvalueNull = 0
}
lib.Atg_PadEnum(ptr, ts.ctensor, pad, padLen, mode, cvalueVal, cvalueNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddingValue *Scalar, totalLength int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.Atg_PadPackedSequence(ctensorPtr0, data.ctensor, batchSizes.ctensor, cbatchFirst, paddingValue.cscalar, totalLength)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_PdistBackward(ptr, grad.ctensor, ts.ctensor, p, pdist.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _PdistBackwardOut(out *Tensor, grad *Tensor, p float64, pdist *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_PdistBackwardOut(ptr, out.ctensor, grad.ctensor, ts.ctensor, p, pdist.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _PinMemory(device gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_PinMemory(ptr, ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _PinMemoryOut(out *Tensor, device gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_PinMemoryOut(ptr, out.ctensor, ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _PreluKernel(weight *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_PreluKernel(ptr, ts.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _PreluKernelBackward(gradOutput *Tensor, weight *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_PreluKernelBackward(ctensorPtr0, gradOutput.ctensor, ts.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_RemoveBatchDim(ptr, ts.ctensor, level, batchSize, outDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ReshapeAlias(size []int64, stride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
lib.Atg_ReshapeAlias(ptr, ts.ctensor, size, sizeLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ReshapeAliasCopy(size []int64, stride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
lib.Atg_ReshapeAliasCopy(ptr, ts.ctensor, size, sizeLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ReshapeAliasCopyOut(out *Tensor, size []int64, stride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
lib.Atg_ReshapeAliasCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ReshapeCopy(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_ReshapeCopy(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ReshapeFromTensor(shape *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ReshapeFromTensor(ptr, ts.ctensor, shape.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ResizeOutput(size []int64, device gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_ResizeOutput(ptr, ts.ctensor, size, sizeLen, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ResizeOutput_(size []int64, device gotch.Device)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_ResizeOutput_(ptr, ts.ctensor, size, sizeLen, device.CInt())
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ResizeOutputOut(out *Tensor, size []int64, device gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_ResizeOutputOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_RowwisePrune(ctensorPtr0, weight.ctensor, mask.ctensor, compressedIndicesDtype.CInt())
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SampleDirichlet(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SampleDirichlet(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SampleDirichletOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SampleDirichletOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SaturateWeightToFp16(weight *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SaturateWeightToFp16(ptr, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _ScaledDotProductAttention(query *Tensor, key *Tensor, value *Tensor, attnMask *Tensor, dropoutP float64, needAttnWeights bool, isCausal bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cneedAttnWeights := int32(0)
if needAttnWeights { cneedAttnWeights = int32(1) }
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
lib.Atg_ScaledDotProductAttention(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, attnMask.ctensor, dropoutP, cneedAttnWeights, cisCausal)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _ScaledDotProductAttentionMath(query *Tensor, key *Tensor, value *Tensor, attnMask *Tensor, dropoutP float64, isCausal bool, dropoutMask *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
lib.Atg_ScaledDotProductAttentionMath(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, attnMask.ctensor, dropoutP, cisCausal, dropoutMask.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _ScaledDotProductEfficientAttention(query *Tensor, key *Tensor, value *Tensor, computeLogSumexp bool, isCausal bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ccomputeLogSumexp := int32(0)
if computeLogSumexp { ccomputeLogSumexp = int32(1) }
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
lib.Atg_ScaledDotProductEfficientAttention(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, ccomputeLogSumexp, cisCausal)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _ScaledDotProductEfficientAttentionBackward(gradOut_ *Tensor, query *Tensor, key *Tensor, value *Tensor, out *Tensor, logsumexp *Tensor, isCausal bool, chunkGradOutputs bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
cchunkGradOutputs := int32(0)
if chunkGradOutputs { cchunkGradOutputs = int32(1) }
lib.Atg_ScaledDotProductEfficientAttentionBackward(ctensorPtr0, gradOut_.ctensor, query.ctensor, key.ctensor, value.ctensor, out.ctensor, logsumexp.ctensor, cisCausal, cchunkGradOutputs)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _ScaledDotProductFlashAttentionBackward(gradOut *Tensor, query *Tensor, key *Tensor, value *Tensor, out *Tensor, logsumexp *Tensor, cumSeqQ *Tensor, cumSeqK *Tensor, maxQ int64, maxK int64, dropoutP float64, isCausal bool, philoxSeed int64, philoxOffset int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
lib.Atg_ScaledDotProductFlashAttentionBackward(ctensorPtr0, gradOut.ctensor, query.ctensor, key.ctensor, value.ctensor, out.ctensor, logsumexp.ctensor, cumSeqQ.ctensor, cumSeqK.ctensor, maxQ, maxK, dropoutP, cisCausal, philoxSeed, philoxOffset)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, offsets *Tensor, axis int64, initial *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SegmentReduceBackward(ptr, grad.ctensor, output.ctensor, data.ctensor, reduce, lengths.ctensor, offsets.ctensor, axis, initial.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SegmentReduceBackwardOut(out *Tensor, grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, offsets *Tensor, axis int64, initial *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SegmentReduceBackwardOut(ptr, out.ctensor, grad.ctensor, output.ctensor, data.ctensor, reduce, lengths.ctensor, offsets.ctensor, axis, initial.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ShapeAsTensor(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ShapeAsTensor(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _SlowConv2dBackward(gradInput *Tensor, gradWeight *Tensor, gradBias *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
lib.Atg_SlowConv2dBackward(ctensorPtr0, gradInput.ctensor, gradWeight.ctensor, gradBias.ctensor, gradOutput.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int64, numGenerated int64, dtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_SobolEngineDraw(ctensorPtr0, quasi.ctensor, n, sobolstate.ctensor, dimension, numGenerated, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SobolEngineFf_(ptr, ts.ctensor, n, sobolstate.ctensor, dimension, numGenerated)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SobolEngineInitializeState_(dimension int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SobolEngineInitializeState_(ptr, ts.ctensor, dimension)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SobolEngineScramble_(ltm *Tensor, dimension int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SobolEngineScramble_(ptr, ts.ctensor, ltm.ctensor, dimension)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Softmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat { chalfToFloat = int32(1) }
lib.Atg_Softmax(ptr, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SoftmaxBackwardDataOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat { chalfToFloat = int32(1) }
lib.Atg_SoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseBroadcastTo(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseBroadcastTo(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseBroadcastToCopy(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseBroadcastToCopy(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseBroadcastToCopyOut(out *Tensor, size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseBroadcastToCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseBscTensorUnsafe(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseBscTensorUnsafe(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseBsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseBsrTensorUnsafe(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseCompressedTensorUnsafe(compressedIndices *Tensor, plainIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseCompressedTensorUnsafe(ptr, compressedIndices.ctensor, plainIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseCooTensorUnsafe(ptr, indices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseCooTensorWithDims(ptr, sparseDim, denseDim, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseCooTensorWithDimsAndTensors(ptr, sparseDim, denseDim, size, sizeLen, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseCooTensorWithDimsAndTensorsOut(out *Tensor, sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseCooTensorWithDimsAndTensorsOut(ptr, out.ctensor, sparseDim, denseDim, size, sizeLen, indices.ctensor, values.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseCooTensorWithDimsOut(out *Tensor, sparseDim int64, denseDim int64, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseCooTensorWithDimsOut(ptr, out.ctensor, sparseDim, denseDim, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseCscTensorUnsafe(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseCscTensorUnsafe(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseCsrProd(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.Atg_SparseCsrProd(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseCsrProdDimDtypeOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.Atg_SparseCsrProdDimDtypeOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseCsrSum(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.Atg_SparseCsrSum(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseCsrSumDimDtypeOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.Atg_SparseCsrSumDimDtypeOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_SparseCsrTensorUnsafe(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseLogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat { chalfToFloat = int32(1) }
lib.Atg_SparseLogSoftmax(ptr, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseLogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseLogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseLogSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseLogSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseLogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat { chalfToFloat = int32(1) }
lib.Atg_SparseLogSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseMm(sparse *Tensor, dense *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseMm(ptr, sparse.ctensor, dense.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SparseMmReduce(sparse *Tensor, dense *Tensor, reduce string)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseMmReduce(ptr, sparse.ctensor, dense.ctensor, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _SparseMmReduceImpl(other *Tensor, reduce string, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_SparseMmReduceImpl(ctensorPtr0, ts.ctensor, other.ctensor, reduce)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat { chalfToFloat = int32(1) }
lib.Atg_SparseSoftmax(ptr, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat { chalfToFloat = int32(1) }
lib.Atg_SparseSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSparseMatmul(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSparseMatmul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSparseMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSparseMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSum(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSum(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSumBackward(grad *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.Atg_SparseSumBackward(ptr, grad.ctensor, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSumBackwardOut(out *Tensor, grad *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.Atg_SparseSumBackwardOut(ptr, out.ctensor, grad.ctensor, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSumDim(dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.Atg_SparseSumDim(ptr, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.Atg_SparseSumDimDtype(ptr, ts.ctensor, dim, dimLen, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSumDimOut(out *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.Atg_SparseSumDimOut(ptr, out.ctensor, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _SparseSumDtype(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSumDtype(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _Spdiags(diagonals *Tensor, offsets *Tensor, shape []int64, layout Layout)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
shapeLen := len(shape)
lib.Atg_Spdiags(ptr, diagonals.ctensor, offsets.ctensor, shape, shapeLen, int8(layout))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _SpdiagsOut(out *Tensor, diagonals *Tensor, offsets *Tensor, shape []int64, layout Layout)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
shapeLen := len(shape)
lib.Atg_SpdiagsOut(ptr, out.ctensor, diagonals.ctensor, offsets.ctensor, shape, shapeLen, int8(layout))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _Stack(tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.Atg_Stack(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _StackOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.Atg_StackOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _StandardGamma(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_StandardGamma(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _StandardGammaGrad(output *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_StandardGammaGrad(ptr, ts.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _StandardGammaGradOut(out *Tensor, output *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_StandardGammaGradOut(ptr, out.ctensor, ts.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _StandardGammaOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_StandardGammaOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestAmbiguousDefaults(dummy *Tensor, a int64, b int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAmbiguousDefaults(ptr, dummy.ctensor, a, b)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAmbiguousDefaultsB(ptr, dummy.ctensor, a, b)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestAutogradMultipleDispatch(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAutogradMultipleDispatch(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestAutogradMultipleDispatchFullcoverageOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAutogradMultipleDispatchFullcoverageOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestAutogradMultipleDispatchNtonly(b bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cb := int32(0)
if b { cb = int32(1) }
lib.Atg_TestAutogradMultipleDispatchNtonly(ptr, ts.ctensor, cb)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestAutogradMultipleDispatchView(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAutogradMultipleDispatchView(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestAutogradMultipleDispatchViewCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAutogradMultipleDispatchViewCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestAutogradMultipleDispatchViewCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAutogradMultipleDispatchViewCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestCheckTensor(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestCheckTensor(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestOptionalFilledIntlist(values *Tensor, addends []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
addendsLen := len(addends)
lib.Atg_TestOptionalFilledIntlist(ptr, values.ctensor, addends, addendsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestOptionalFilledIntlistOut(out *Tensor, values *Tensor, addends []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
addendsLen := len(addends)
lib.Atg_TestOptionalFilledIntlistOut(ptr, out.ctensor, values.ctensor, addends, addendsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestOptionalFloatlist(values *Tensor, addends []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
addendsLen := len(addends)
lib.Atg_TestOptionalFloatlist(ptr, values.ctensor, addends, addendsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestOptionalFloatlistOut(out *Tensor, values *Tensor, addends []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
addendsLen := len(addends)
lib.Atg_TestOptionalFloatlistOut(ptr, out.ctensor, values.ctensor, addends, addendsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestOptionalIntlist(values *Tensor, addends []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
addendsLen := len(addends)
lib.Atg_TestOptionalIntlist(ptr, values.ctensor, addends, addendsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestOptionalIntlistOut(out *Tensor, values *Tensor, addends []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
addendsLen := len(addends)
lib.Atg_TestOptionalIntlistOut(ptr, out.ctensor, values.ctensor, addends, addendsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestSerializationSubcmul(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestSerializationSubcmul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TestStringDefault(dummy *Tensor, a string, b string)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestStringDefault(ptr, dummy.ctensor, a, b)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestWarnInAutograd(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestWarnInAutograd(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _TestWarnInAutogradOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestWarnInAutogradOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_ToCopy(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ToCopyOut(out *Tensor, nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.Atg_ToCopyOut(ptr, out.ctensor, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ToDense(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ToDense(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ToDenseOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ToDenseOut(ptr, out.ctensor, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _TransformBiasRescaleQkv(qkv *Tensor, qkvBias *Tensor, numHeads int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_TransformBiasRescaleQkv(ctensorPtr0, qkv.ctensor, qkvBias.ctensor, numHeads)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _TransformBiasRescaleQkvOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, qkv *Tensor, qkvBias *Tensor, numHeads int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_TransformBiasRescaleQkvOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, qkv.ctensor, qkvBias.ctensor, numHeads)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _TransformerDecoderOnlyLayerFwd(src *Tensor, embedDim int64, numHeads int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, useGelu bool, normFirst bool, eps float64, normWeight1 *Tensor, normBias1 *Tensor, normWeight2 *Tensor, normBias2 *Tensor, ffnWeight1 *Tensor, ffnBias1 *Tensor, ffnWeight2 *Tensor, ffnBias2 *Tensor, mask *Tensor, incrKey *Tensor, incrValue *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cuseGelu := int32(0)
if useGelu { cuseGelu = int32(1) }
cnormFirst := int32(0)
if normFirst { cnormFirst = int32(1) }
lib.Atg_TransformerDecoderOnlyLayerFwd(ctensorPtr0, src.ctensor, embedDim, numHeads, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, cuseGelu, cnormFirst, eps, normWeight1.ctensor, normBias1.ctensor, normWeight2.ctensor, normBias2.ctensor, ffnWeight1.ctensor, ffnBias1.ctensor, ffnWeight2.ctensor, ffnBias2.ctensor, mask.ctensor, incrKey.ctensor, incrValue.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _TransformerDecoderOnlyLayerFwdOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, src *Tensor, embedDim int64, numHeads int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, useGelu bool, normFirst bool, eps float64, normWeight1 *Tensor, normBias1 *Tensor, normWeight2 *Tensor, normBias2 *Tensor, ffnWeight1 *Tensor, ffnBias1 *Tensor, ffnWeight2 *Tensor, ffnBias2 *Tensor, mask *Tensor, incrKey *Tensor, incrValue *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cuseGelu := int32(0)
if useGelu { cuseGelu = int32(1) }
cnormFirst := int32(0)
if normFirst { cnormFirst = int32(1) }
lib.Atg_TransformerDecoderOnlyLayerFwdOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, src.ctensor, embedDim, numHeads, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, cuseGelu, cnormFirst, eps, normWeight1.ctensor, normBias1.ctensor, normWeight2.ctensor, normBias2.ctensor, ffnWeight1.ctensor, ffnBias1.ctensor, ffnWeight2.ctensor, ffnBias2.ctensor, mask.ctensor, incrKey.ctensor, incrValue.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TransformerEncoderLayerFwd(src *Tensor, embedDim int64, numHeads int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, useGelu bool, normFirst bool, eps float64, normWeight1 *Tensor, normBias1 *Tensor, normWeight2 *Tensor, normBias2 *Tensor, ffnWeight1 *Tensor, ffnBias1 *Tensor, ffnWeight2 *Tensor, ffnBias2 *Tensor, mask *Tensor, maskType []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cuseGelu := int32(0)
if useGelu { cuseGelu = int32(1) }
cnormFirst := int32(0)
if normFirst { cnormFirst = int32(1) }
var cmaskTypeVal int64 = 0
var cmaskTypeNull int = 1
if len(maskType) > 0 {
cmaskTypeVal = maskType[0]
cmaskTypeNull = 0
}
lib.Atg_TransformerEncoderLayerFwd(ptr, src.ctensor, embedDim, numHeads, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, cuseGelu, cnormFirst, eps, normWeight1.ctensor, normBias1.ctensor, normWeight2.ctensor, normBias2.ctensor, ffnWeight1.ctensor, ffnBias1.ctensor, ffnWeight2.ctensor, ffnBias2.ctensor, mask.ctensor, cmaskTypeVal, cmaskTypeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TransformerEncoderLayerFwdOut(out *Tensor, src *Tensor, embedDim int64, numHeads int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, useGelu bool, normFirst bool, eps float64, normWeight1 *Tensor, normBias1 *Tensor, normWeight2 *Tensor, normBias2 *Tensor, ffnWeight1 *Tensor, ffnBias1 *Tensor, ffnWeight2 *Tensor, ffnBias2 *Tensor, mask *Tensor, maskType []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cuseGelu := int32(0)
if useGelu { cuseGelu = int32(1) }
cnormFirst := int32(0)
if normFirst { cnormFirst = int32(1) }
var cmaskTypeVal int64 = 0
var cmaskTypeNull int = 1
if len(maskType) > 0 {
cmaskTypeVal = maskType[0]
cmaskTypeNull = 0
}
lib.Atg_TransformerEncoderLayerFwdOut(ptr, out.ctensor, src.ctensor, embedDim, numHeads, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, cuseGelu, cnormFirst, eps, normWeight1.ctensor, normBias1.ctensor, normWeight2.ctensor, normBias2.ctensor, ffnWeight1.ctensor, ffnBias1.ctensor, ffnWeight2.ctensor, ffnBias2.ctensor, mask.ctensor, cmaskTypeVal, cmaskTypeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
expand1Len := len(expand1)
expand2Len := len(expand2)
expand3Len := len(expand3)
sumdimLen := len(sumdim)
lib.Atg_Trilinear(ptr, i1.ctensor, i2.ctensor, i3.ctensor, expand1, expand1Len, expand2, expand2Len, expand3, expand3Len, sumdim, sumdimLen, unrollDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TrilinearOut(out *Tensor, i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
expand1Len := len(expand1)
expand2Len := len(expand2)
expand3Len := len(expand3)
sumdimLen := len(sumdim)
lib.Atg_TrilinearOut(ptr, out.ctensor, i1.ctensor, i2.ctensor, i3.ctensor, expand1, expand1Len, expand2, expand2Len, expand3, expand3Len, sumdim, sumdimLen, unrollDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TritonMultiHeadAttention(query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TritonMultiHeadAttention(ptr, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TritonMultiHeadAttentionOut(out *Tensor, query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TritonMultiHeadAttentionOut(ptr, out.ctensor, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TritonScaledDotAttention(q *Tensor, k *Tensor, v *Tensor, dropoutP float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TritonScaledDotAttention(ptr, q.ctensor, k.ctensor, v.ctensor, dropoutP)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _TritonScaledDotAttentionOut(out *Tensor, q *Tensor, k *Tensor, v *Tensor, dropoutP float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TritonScaledDotAttentionOut(ptr, out.ctensor, q.ctensor, k.ctensor, v.ctensor, dropoutP)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _Unique(sorted bool, returnInverse bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted { csorted = int32(1) }
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
lib.Atg_Unique(ctensorPtr0, ts.ctensor, csorted, creturnInverse)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _Unique2(sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted { csorted = int32(1) }
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
creturnCounts := int32(0)
if returnCounts { creturnCounts = int32(1) }
lib.Atg_Unique2(ctensorPtr0, ts.ctensor, csorted, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _Unique2Out(out0 *Tensor, out1 *Tensor, out2 *Tensor, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted { csorted = int32(1) }
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
creturnCounts := int32(0)
if returnCounts { creturnCounts = int32(1) }
lib.Atg_Unique2Out(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, ts.ctensor, csorted, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) _UniqueOut(out0 *Tensor, out1 *Tensor, sorted bool, returnInverse bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted { csorted = int32(1) }
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
lib.Atg_UniqueOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, csorted, creturnInverse)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _UnpackDual(dual *Tensor, level int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_UnpackDual(ctensorPtr0, dual.ctensor, level)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UnsafeView(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_UnsafeView(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UnsafeViewOut(out *Tensor, size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.Atg_UnsafeViewOut(ptr, out.ctensor, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleBicubic2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBicubic2dAa(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleBicubic2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBicubic2dAaBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleBicubic2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBicubic2dAaBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleBicubic2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBicubic2dAaOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleBicubic2dAaVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
scaleFactorsLen := len(scaleFactors)
lib.Atg_UpsampleBicubic2dAaVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleBilinear2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBilinear2dAa(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleBilinear2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBilinear2dAaBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleBilinear2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBilinear2dAaBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleBilinear2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBilinear2dAaOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleBilinear2dAaVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
scaleFactorsLen := len(scaleFactors)
lib.Atg_UpsampleBilinear2dAaVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleNearestExact1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.Atg_UpsampleNearestExact1d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.Atg_UpsampleNearestExact1dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.Atg_UpsampleNearestExact1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleNearestExact1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.Atg_UpsampleNearestExact1dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact1dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
scaleFactorsLen := len(scaleFactors)
lib.Atg_UpsampleNearestExact1dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleNearestExact2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact2d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact2dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleNearestExact2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact2dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
scaleFactorsLen := len(scaleFactors)
lib.Atg_UpsampleNearestExact2dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleNearestExact3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact3d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact3dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _UpsampleNearestExact3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _UpsampleNearestExact3dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
scaleFactorsLen := len(scaleFactors)
lib.Atg_UpsampleNearestExact3dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func _UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64)(retVal bool, err error) {
inputLengthsLen := len(inputLengths)
targetLengthsLen := len(targetLengths)
retVal = lib.Atg_UseCudnnCtcLoss(logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func _UseCudnnCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64)(retVal bool, err error) {
retVal = lib.Atg_UseCudnnCtcLossTensor(logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func _UseCudnnRnnFlattenWeight()(retVal bool, err error) {
retVal = lib.Atg_UseCudnnRnnFlattenWeight()
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _Values(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Values(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ValuesCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ValuesCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) _ValuesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ValuesCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) _Version(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.Atg_Version(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func _WeightNorm(v *Tensor, g *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_WeightNorm(ptr, v.ctensor, g.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_WeightNormDifferentiableBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _WeightNormInterface(v *Tensor, g *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_WeightNormInterface(ctensorPtr0, v.ctensor, g.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _WeightNormInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_WeightNormInterfaceBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _WeightNormInterfaceBackwardOut(out0 *Tensor, out1 *Tensor, gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_WeightNormInterfaceBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func _WeightNormInterfaceOut(out0 *Tensor, out1 *Tensor, v *Tensor, g *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_WeightNormInterfaceOut(ctensorPtr0, out0.ctensor, out1.ctensor, v.ctensor, g.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Abs(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbs(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Abs_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbs_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AbsOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbsOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Absolute(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbsolute(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Absolute_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbsolute_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AbsoluteOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbsoluteOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Acos(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcos(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Acos_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcos_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AcosOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcosOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Acosh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcosh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Acosh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcosh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AcoshOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcoshOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveAvgPool1d(ptr, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveAvgPool3dBackward(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) AdaptiveMaxPool1d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveMaxPool1d(ctensorPtr0, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) AdaptiveMaxPool2d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveMaxPool2d(ctensorPtr0, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) AdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveMaxPool2dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) AdaptiveMaxPool3d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveMaxPool3d(ctensorPtr0, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) AdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
outputSizeLen := len(outputSize)
lib.AtgAdaptiveMaxPool3dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Add(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdd(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Add_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdd_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addbmm_(batch1 *Tensor, batch2 *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcdiv(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addcdiv_(tensor1 *Tensor, tensor2 *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcdiv_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcdivOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addcmul(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcmul(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addcmul_(tensor1 *Tensor, tensor2 *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcmul_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcmulOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addmm_(mat1 *Tensor, mat2 *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmm_(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addmv(mat *Tensor, vec *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmv(ptr, ts.ctensor, mat.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addmv_(mat *Tensor, vec *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmv_(ptr, ts.ctensor, mat.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmvOut(ptr, out.ctensor, ts.ctensor, mat.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addr(vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Addr_(vec1 *Tensor, vec2 *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Adjoint(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdjoint(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func AffineGridGenerator(theta *Tensor, size []int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.AtgAffineGridGenerator(ptr, theta.ctensor, size, sizeLen, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func AffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.AtgAffineGridGeneratorBackward(ptr, grad.ctensor, size, sizeLen, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func AffineGridGeneratorOut(out *Tensor, theta *Tensor, size []int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.AtgAffineGridGeneratorOut(ptr, out.ctensor, theta.ctensor, size, sizeLen, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Alias(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAlias(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AliasCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAliasCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AliasCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAliasCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AlignAs(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAlignAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) All(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAll(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AllAllOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAllAllOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AllDim(dim int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAllDim(ptr, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AllOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAllOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) Allclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
cequalNan := int32(0)
if equalNan { cequalNan = int32(1) }
retVal = lib.AtgAllclose(ts.ctensor, other.ctensor, rtol, atol, cequalNan)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func AlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgAlphaDropout(ptr, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AlphaDropout_(p float64, train bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgAlphaDropout_(ptr, ts.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Amax(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAmax(ptr, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAmaxOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Amin(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAmin(ptr, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAminOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Aminmax(dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAminmax(ctensorPtr0, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) AminmaxOut(min *Tensor, max *Tensor, dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAminmaxOut(ctensorPtr0, min.ctensor, max.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Angle(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAngle(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AngleOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAngleOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Any(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAny(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AnyAllOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAnyAllOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AnyDim(dim int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAnyDim(ptr, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AnyOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgAnyOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Arange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArange(ptr, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArangeStart(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ArangeStartStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArangeStartStep(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arccos(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccos(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arccos_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccos_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArccosOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccosOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arccosh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccosh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arccosh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccosh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArccoshOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccoshOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arcsin(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsin(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arcsin_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsin_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArcsinOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsinOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arcsinh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsinh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arcsinh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsinh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArcsinhOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsinhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arctan(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arctan2(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan2(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arctan2_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan2_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arctan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arctan_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArctanOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctanOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arctanh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctanh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Arctanh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctanh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArctanhOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctanhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Argmax(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgArgmax(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArgmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgArgmaxOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Argmin(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgArgmin(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArgminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgArgminOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Argsort(dim int64, descending bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cdescending := int32(0)
if descending { cdescending = int32(1) }
lib.AtgArgsort(ptr, ts.ctensor, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArgsortStable(stable bool, dim int64, descending bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cstable := int32(0)
if stable { cstable = int32(1) }
cdescending := int32(0)
if descending { cdescending = int32(1) }
lib.AtgArgsortStable(ptr, ts.ctensor, cstable, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ArgsortStableOut(out *Tensor, stable bool, dim int64, descending bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cstable := int32(0)
if stable { cstable = int32(1) }
cdescending := int32(0)
if descending { cdescending = int32(1) }
lib.AtgArgsortStableOut(ptr, out.ctensor, ts.ctensor, cstable, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Argwhere(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArgwhere(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AsStrided(size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
var cstorageOffsetVal int64 = 0
var cstorageOffsetNull int = 1
if len(storageOffset) > 0 {
cstorageOffsetVal = storageOffset[0]
cstorageOffsetNull = 0
}
lib.AtgAsStrided(ptr, ts.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AsStrided_(size []int64, stride []int64, storageOffset []int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
var cstorageOffsetVal int64 = 0
var cstorageOffsetNull int = 1
if len(storageOffset) > 0 {
cstorageOffsetVal = storageOffset[0]
cstorageOffsetNull = 0
}
lib.AtgAsStrided_(ptr, ts.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AsStridedCopy(size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
var cstorageOffsetVal int64 = 0
var cstorageOffsetNull int = 1
if len(storageOffset) > 0 {
cstorageOffsetVal = storageOffset[0]
cstorageOffsetNull = 0
}
lib.AtgAsStridedCopy(ptr, ts.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AsStridedCopyOut(out *Tensor, size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
var cstorageOffsetVal int64 = 0
var cstorageOffsetNull int = 1
if len(storageOffset) > 0 {
cstorageOffsetVal = storageOffset[0]
cstorageOffsetNull = 0
}
lib.AtgAsStridedCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AsStridedScatter(src *Tensor, size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
var cstorageOffsetVal int64 = 0
var cstorageOffsetNull int = 1
if len(storageOffset) > 0 {
cstorageOffsetVal = storageOffset[0]
cstorageOffsetNull = 0
}
lib.AtgAsStridedScatter(ptr, ts.ctensor, src.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AsStridedScatterOut(out *Tensor, src *Tensor, size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
var cstorageOffsetVal int64 = 0
var cstorageOffsetNull int = 1
if len(storageOffset) > 0 {
cstorageOffsetVal = storageOffset[0]
cstorageOffsetNull = 0
}
lib.AtgAsStridedScatterOut(ptr, out.ctensor, ts.ctensor, src.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Asin(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsin(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Asin_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsin_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AsinOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsinOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Asinh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsinh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Asinh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsinh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AsinhOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsinhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atan(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atan2(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan2(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atan2_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan2_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atan_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AtanOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtanOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atanh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtanh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atanh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtanh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AtanhOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtanhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atleast1d(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtleast1d(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atleast2d(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtleast2d(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Atleast3d(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtleast3d(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
lib.AtgAvgPool1d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) AvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
ccountIncludePad := int32(0)
if countIncludePad { ccountIncludePad = int32(1) }
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Baddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBaddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Baddbmm_(batch1 *Tensor, batch2 *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBaddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBaddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBartlettWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BartlettWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBartlettWindowOut(ptr, out.ctensor, windowLength)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgBartlettWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BartlettWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgBartlettWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
ccudnnEnabled := int32(0)
if cudnnEnabled { ccudnnEnabled = int32(1) }
lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps, ccudnnEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor, count *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBatchNormBackwardElemt(ptr, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor, count.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BatchNormBackwardElemtOut(out *Tensor, gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor, count *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBatchNormBackwardElemtOut(ptr, out.ctensor, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor, count.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormBackwardReduce(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cinputG := int32(0)
if inputG { cinputG = int32(1) }
cweightG := int32(0)
if weightG { cweightG = int32(1) }
cbiasG := int32(0)
if biasG { cbiasG = int32(1) }
lib.AtgBatchNormBackwardReduce(ctensorPtr0, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, cinputG, cweightG, cbiasG)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormBackwardReduceOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cinputG := int32(0)
if inputG { cinputG = int32(1) }
cweightG := int32(0)
if weightG { cweightG = int32(1) }
cbiasG := int32(0)
if biasG { cbiasG = int32(1) }
lib.AtgBatchNormBackwardReduceOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, cinputG, cweightG, cbiasG)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed 1`:
// --------------------------
func BatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBatchNormElemt(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBatchNormElemtOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormGatherStats(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, count)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormGatherStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormGatherStatsOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, count)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormGatherStatsWithCounts(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, counts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormGatherStatsWithCountsOut(out0 *Tensor, out1 *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormGatherStatsWithCountsOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, counts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormStats(input *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormStats(ctensorPtr0, input.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormStatsOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormUpdateStats(ctensorPtr0, input.ctensor, runningMean.ctensor, runningVar.ctensor, momentum)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func BatchNormUpdateStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormUpdateStatsOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, runningMean.ctensor, runningVar.ctensor, momentum)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Bernoulli(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulli(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Bernoulli_(p *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulli_(ptr, ts.ctensor, p.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BernoulliFloat_(p float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulliFloat_(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BernoulliP(p float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulliP(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BernoulliTensor(p *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulliTensor(ptr, ts.ctensor, p.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Bilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBilinear(ptr, input1.ctensor, input2.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropy(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyWithLogits(ptr, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BinaryCrossEntropyWithLogitsOut(out *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyWithLogitsOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Bincount(weights *Tensor, minlength int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBincount(ptr, ts.ctensor, weights.ctensor, minlength)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BincountOut(out *Tensor, weights *Tensor, minlength int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBincountOut(ptr, out.ctensor, ts.ctensor, weights.ctensor, minlength)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Binomial(count *Tensor, prob *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinomial(ptr, count.ctensor, prob.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BinomialOut(out *Tensor, count *Tensor, prob *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinomialOut(ptr, out.ctensor, count.ctensor, prob.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseAnd(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAnd(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseAnd_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAnd_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseAndScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseAndScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseAndScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseAndTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseAndTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseAndTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseLeftShift(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShift(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseLeftShift_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShift_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseLeftShiftScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseLeftShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftTensorScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseLeftShiftTensorScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftTensorScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseNot(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseNot(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseNot_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseNot_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseNotOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseNotOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseOr(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOr(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseOr_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOr_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseOrScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseOrScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseOrScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseOrTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseOrTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseOrTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseRightShift(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShift(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseRightShift_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShift_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseRightShiftScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseRightShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftTensorScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseRightShiftTensorScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftTensorScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseXor(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXor(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseXor_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXor_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseXorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseXorScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BitwiseXorScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseXorTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseXorTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BitwiseXorTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBlackmanWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BlackmanWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBlackmanWindowOut(ptr, out.ctensor, windowLength)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgBlackmanWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BlackmanWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgBlackmanWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BlockDiag(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgBlockDiag(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BlockDiagOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgBlockDiagOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Bmm(mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBmm(ptr, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BmmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BroadcastTo(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgBroadcastTo(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Bucketize(boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
cright := int32(0)
if right { cright = int32(1) }
lib.AtgBucketize(ptr, ts.ctensor, boundaries.ctensor, coutInt32, cright)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BucketizeScalar(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
cright := int32(0)
if right { cright = int32(1) }
lib.AtgBucketizeScalar(ptr, selfScalar.cscalar, boundaries.ctensor, coutInt32, cright)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func BucketizeScalarOut(out *Tensor, selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
cright := int32(0)
if right { cright = int32(1) }
lib.AtgBucketizeScalarOut(ptr, out.ctensor, selfScalar.cscalar, boundaries.ctensor, coutInt32, cright)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) BucketizeTensorOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
cright := int32(0)
if right { cright = int32(1) }
lib.AtgBucketizeTensorOut(ptr, out.ctensor, ts.ctensor, boundaries.ctensor, coutInt32, cright)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func CanCast(from gotch.DType, to gotch.DType)(retVal bool, err error) {
retVal = lib.AtgCanCast(from.CInt(), to.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CartesianProd(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgCartesianProd(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Cat(tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgCat(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CatOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgCatOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cauchy(median float64, sigma float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCauchy(ptr, ts.ctensor, median, sigma)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cauchy_(median float64, sigma float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCauchy_(ptr, ts.ctensor, median, sigma)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CauchyOut(out *Tensor, median float64, sigma float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCauchyOut(ptr, out.ctensor, ts.ctensor, median, sigma)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CcolIndices(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCcolIndices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CcolIndicesCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCcolIndicesCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CcolIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCcolIndicesCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Cdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ccomputeModeVal int64 = 0
var ccomputeModeNull int = 1
if len(computeMode) > 0 {
ccomputeModeVal = computeMode[0]
ccomputeModeNull = 0
}
lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, ccomputeModeVal, ccomputeModeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ceil(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCeil(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ceil_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCeil_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CeilOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCeilOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Celu(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Celu_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCelu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CeluOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCeluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ChainMatmul(matrices []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cmatrices []lib.Ctensor
for _, t := range matrices {cmatrices = append(cmatrices, t.ctensor)}
lib.AtgChainMatmul(ptr, cmatrices, len(cmatrices))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ChainMatmulOut(out *Tensor, matrices []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cmatrices []lib.Ctensor
for _, t := range matrices {cmatrices = append(cmatrices, t.ctensor)}
lib.AtgChainMatmulOut(ptr, out.ctensor, cmatrices, len(cmatrices))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Chalf(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgChalf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ChannelShuffle(groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgChannelShuffle(ptr, ts.ctensor, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ChannelShuffleOut(out *Tensor, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgChannelShuffleOut(ptr, out.ctensor, ts.ctensor, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cholesky(upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.AtgCholesky(ptr, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CholeskyInverse(upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.AtgCholeskyInverse(ptr, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CholeskyInverseOut(out *Tensor, upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.AtgCholeskyInverseOut(ptr, out.ctensor, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.AtgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CholeskySolve(input2 *Tensor, upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.AtgCholeskySolve(ptr, ts.ctensor, input2.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.AtgCholeskySolveOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func ChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float64, bitWidth int64)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgChooseQparamsOptimized(ctensorPtr0, input.ctensor, numel, nBins, ratio, bitWidth)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Clamp(min *Scalar, max *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Clamp_(min *Scalar, max *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMax(max *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMax(ptr, ts.ctensor, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMax_(max *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMax_(ptr, ts.ctensor, max.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMaxOut(out *Tensor, max *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMaxOut(ptr, out.ctensor, ts.ctensor, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMaxTensor(max *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMaxTensor(ptr, ts.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMaxTensor_(max *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMaxTensor_(ptr, ts.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMaxTensorOut(out *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMaxTensorOut(ptr, out.ctensor, ts.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMin(min *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMin(ptr, ts.ctensor, min.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMin_(min *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMin_(ptr, ts.ctensor, min.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMinOut(out *Tensor, min *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMinOut(ptr, out.ctensor, ts.ctensor, min.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMinTensor(min *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMinTensor(ptr, ts.ctensor, min.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMinTensor_(min *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMinTensor_(ptr, ts.ctensor, min.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampMinTensorOut(out *Tensor, min *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMinTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampTensor(ptr, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampTensor_(min *Tensor, max *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Clip(min *Scalar, max *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClip(ptr, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Clip_(min *Scalar, max *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClip_(ptr, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClipOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClipOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClipTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClipTensor(ptr, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClipTensor_(min *Tensor, max *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClipTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClipTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Clone(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClone(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Coalesce(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCoalesce(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Col2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
kernelSizeLen := len(kernelSize)
dilationLen := len(dilation)
paddingLen := len(padding)
strideLen := len(stride)
lib.AtgCol2im(ptr, ts.ctensor, outputSize, outputSizeLen, kernelSize, kernelSizeLen, dilation, dilationLen, padding, paddingLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Col2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
kernelSizeLen := len(kernelSize)
dilationLen := len(dilation)
paddingLen := len(padding)
strideLen := len(stride)
lib.AtgCol2imOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, kernelSize, kernelSizeLen, dilation, dilationLen, padding, paddingLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ColIndices(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgColIndices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ColIndicesCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgColIndicesCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ColIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgColIndicesCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ColumnStack(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgColumnStack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ColumnStackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgColumnStackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Combinations(r int64, withReplacement bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cwithReplacement := int32(0)
if withReplacement { cwithReplacement = int32(1) }
lib.AtgCombinations(ptr, ts.ctensor, r, cwithReplacement)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Complex(real *Tensor, imag *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgComplex(ptr, real.ctensor, imag.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ComplexOut(out *Tensor, real *Tensor, imag *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgComplexOut(ptr, out.ctensor, real.ctensor, imag.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Concat(tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgConcat(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ConcatOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgConcatOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Concatenate(tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgConcatenate(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ConcatenateOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgConcatenateOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Conj(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConj(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConjPhysical(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConjPhysical(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConjPhysical_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConjPhysical_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConjPhysicalOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConstantPadNd(pad []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
padLen := len(pad)
lib.AtgConstantPadNd(ptr, ts.ctensor, pad, padLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConstantPadNdOut(out *Tensor, pad []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
padLen := len(pad)
lib.AtgConstantPadNdOut(ptr, out.ctensor, ts.ctensor, pad, padLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Contiguous(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgContiguous(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Conv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Conv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
dilationLen := len(dilation)
lib.AtgConv1dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Conv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Conv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
dilationLen := len(dilation)
lib.AtgConv2dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Conv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Conv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
dilationLen := len(dilation)
lib.AtgConv3dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConvDepthwise3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgConvDepthwise3d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConvDepthwise3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgConvDepthwise3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConvTbc(ptr, ts.ctensor, weight.ctensor, bias.ctensor, pad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) ConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgConvTbcBackward(ctensorPtr0, ts.ctensor, input.ctensor, weight.ctensor, bias.ctensor, pad)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ConvTbcOut(out *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConvTbcOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, pad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
dilationLen := len(dilation)
lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, groups, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
dilationLen := len(dilation)
lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, groups, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
dilationLen := len(dilation)
lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, groups, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
ctransposed := int32(0)
if transposed { ctransposed = int32(1) }
outputPaddingLen := len(outputPadding)
lib.AtgConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ConvolutionOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
ctransposed := int32(0)
if transposed { ctransposed = int32(1) }
outputPaddingLen := len(outputPadding)
lib.AtgConvolutionOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
ctransposed := int32(0)
if transposed { ctransposed = int32(1) }
outputPaddingLen := len(outputPadding)
lib.AtgConvolutionOverrideable(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ConvolutionOverrideableOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
ctransposed := int32(0)
if transposed { ctransposed = int32(1) }
outputPaddingLen := len(outputPadding)
lib.AtgConvolutionOverrideableOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Copy(src *Tensor, nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.AtgCopy(ptr, ts.ctensor, src.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CopyOut(out *Tensor, src *Tensor, nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.AtgCopyOut(ptr, out.ctensor, ts.ctensor, src.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CopySparseToSparse(src *Tensor, nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.AtgCopySparseToSparse(ptr, ts.ctensor, src.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CopySparseToSparse_(src *Tensor, nonBlocking bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.AtgCopySparseToSparse_(ptr, ts.ctensor, src.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CopySparseToSparseOut(out *Tensor, src *Tensor, nonBlocking bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
lib.AtgCopySparseToSparseOut(ptr, out.ctensor, ts.ctensor, src.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Copysign(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysign(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Copysign_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysign_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CopysignOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysignOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CopysignScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysignScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CopysignScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysignScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CopysignScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysignScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Corrcoef(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCorrcoef(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cos(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCos(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cos_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCos_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CosOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cosh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cosh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CoshOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCoshOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosineEmbeddingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosineSimilarity(ptr, x1.ctensor, x2.ctensor, dim, eps)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CountNonzero(dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgCountNonzero(ptr, ts.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CountNonzeroDimIntlist(dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.AtgCountNonzeroDimIntlist(ptr, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CountNonzeroDimIntlistOut(out *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.AtgCountNonzeroDimIntlistOut(ptr, out.ctensor, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CountNonzeroOut(out *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgCountNonzeroOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cov(correction int64, fweights *Tensor, aweights *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCov(ptr, ts.ctensor, correction, fweights.ctensor, aweights.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cross(other *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgCross(ptr, ts.ctensor, other.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CrossEntropyLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, labelSmoothing float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCrossEntropyLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, labelSmoothing)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CrossOut(out *Tensor, other *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CrowIndices(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCrowIndices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CrowIndicesCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCrowIndicesCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CrowIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCrowIndicesCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputLengthsLen := len(inputLengths)
targetLengthsLen := len(targetLengths)
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.AtgCtcLoss(ptr, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, reduction, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
czeroInfinity := int32(0)
if zeroInfinity { czeroInfinity = int32(1) }
lib.AtgCtcLossTensor(ptr, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, reduction, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnAffineGridGenerator(ptr, theta.ctensor, n, c, h, w)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnAffineGridGeneratorBackward(ptr, grad.ctensor, n, c, h, w)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CudnnAffineGridGeneratorBackwardOut(out *Tensor, grad *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnAffineGridGeneratorBackwardOut(ptr, out.ctensor, grad.ctensor, n, c, h, w)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CudnnAffineGridGeneratorOut(out *Tensor, theta *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnAffineGridGeneratorOut(ptr, out.ctensor, theta.ctensor, n, c, h, w)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func CudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgCudnnBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func CudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCudnnBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon, reserveSpace.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func CudnnBatchNormBackwardOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCudnnBatchNormBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon, reserveSpace.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func CudnnBatchNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgCudnnBatchNormOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
callowTf32 := int32(0)
if allowTf32 { callowTf32 = int32(1) }
lib.AtgCudnnConvolution(ptr, ts.ctensor, weight.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgCudnnConvolutionAddRelu(ptr, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnConvolutionAddReluOut(out *Tensor, weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgCudnnConvolutionAddReluOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnConvolutionOut(out *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
callowTf32 := int32(0)
if allowTf32 { callowTf32 = int32(1) }
lib.AtgCudnnConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgCudnnConvolutionRelu(ptr, ts.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnConvolutionReluOut(out *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgCudnnConvolutionReluOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
callowTf32 := int32(0)
if allowTf32 { callowTf32 = int32(1) }
lib.AtgCudnnConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnConvolutionTransposeOut(out *Tensor, weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
callowTf32 := int32(0)
if allowTf32 { callowTf32 = int32(1) }
lib.AtgCudnnConvolutionTransposeOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnGridSampler(grid *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnGridSampler(ptr, ts.ctensor, grid.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) CudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCudnnGridSamplerBackward(ctensorPtr0, ts.ctensor, grid.ctensor, gradOutput.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) CudnnGridSamplerBackwardOut(out0 *Tensor, out1 *Tensor, grid *Tensor, gradOutput *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCudnnGridSamplerBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, grid.ctensor, gradOutput.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CudnnGridSamplerOut(out *Tensor, grid *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnGridSamplerOut(ptr, out.ctensor, ts.ctensor, grid.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) CudnnIsAcceptable(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgCudnnIsAcceptable(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Cummax(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCummax(ctensorPtr0, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) CummaxOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCummaxOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func CummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCummaxminBackward(ptr, grad.ctensor, input.ctensor, indices.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Cummin(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCummin(ctensorPtr0, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) CumminOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCumminOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cumprod(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumprod(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cumprod_(dim int64, dtype gotch.DType)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumprod_(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func CumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumprodBackward(ptr, grad.ctensor, input.ctensor, dim, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumprodOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cumsum(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumsum(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Cumsum_(dim int64, dtype gotch.DType)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumsum_(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) CumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumsumOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CumulativeTrapezoid(y *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumulativeTrapezoid(ptr, y.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func CumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumulativeTrapezoidX(ptr, y.ctensor, x.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Data(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgData(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Deg2rad(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDeg2rad(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Deg2rad_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDeg2rad_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Deg2radOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDeg2radOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) DenseDim(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgDenseDim(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Dequantize(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDequantize(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Det(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDet(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Detach(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDetach(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Detach_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDetach_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DetachCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDetachCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DetachCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDetachCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Diag(diagonal int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiag(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagEmbed(ptr, ts.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DiagEmbedOut(out *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagEmbedOut(ptr, out.ctensor, ts.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DiagOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagOut(ptr, out.ctensor, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Diagflat(offset int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagflat(ptr, ts.ctensor, offset)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagonal(ptr, ts.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func DiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizesLen := len(inputSizes)
lib.AtgDiagonalBackward(ptr, gradOutput.ctensor, inputSizes, inputSizesLen, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func DiagonalBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizesLen := len(inputSizes)
lib.AtgDiagonalBackwardOut(ptr, out.ctensor, gradOutput.ctensor, inputSizes, inputSizesLen, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DiagonalCopy(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagonalCopy(ptr, ts.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DiagonalCopyOut(out *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagonalCopyOut(ptr, out.ctensor, ts.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DiagonalScatter(src *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagonalScatter(ptr, ts.ctensor, src.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DiagonalScatterOut(out *Tensor, src *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagonalScatterOut(ptr, out.ctensor, ts.ctensor, src.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Diff(n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiff(ptr, ts.ctensor, n, dim, prepend.ctensor, append.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiffOut(ptr, out.ctensor, ts.ctensor, n, dim, prepend.ctensor, append.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Digamma(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDigamma(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Digamma_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDigamma_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DigammaOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDigammaOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Dist(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDist(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DistOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDistOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Div(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiv(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Div_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiv_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivScalarMode_(other *Scalar, roundingMode string)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivScalarModeOut(out *Tensor, other *Scalar, roundingMode string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalarModeOut(ptr, out.ctensor, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivTensorMode_(other *Tensor, roundingMode string)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Divide(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivide(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Divide_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivide_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivideOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivideScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivideScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivideScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivideScalarMode_(other *Scalar, roundingMode string)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivideTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DivideTensorMode_(other *Tensor, roundingMode string)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Dot(tensor *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDot(ptr, ts.ctensor, tensor.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) DotOut(out *Tensor, tensor *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDotOut(ptr, out.ctensor, ts.ctensor, tensor.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Dropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgDropout(ptr, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Dropout_(p float64, train bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgDropout_(ptr, ts.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func Dstack(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgDstack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func DstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgDstackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Einsum(equation string, tensors []*Tensor, path []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
pathLen := len(path)
lib.AtgEinsum(ptr, equation, ctensors, len(ctensors), path, pathLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Elu(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgElu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Elu_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgElu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func EluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cisResult := int32(0)
if isResult { cisResult = int32(1) }
lib.AtgEluBackward(ptr, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cisResult := int32(0)
if isResult { cisResult = int32(1) }
lib.AtgEluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EluOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Embedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
lib.AtgEmbeddingBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq, csparse)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
cincludeLastOffset := int32(0)
if includeLastOffset { cincludeLastOffset = int32(1) }
lib.AtgEmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func EmbeddingBagPaddingIdx(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx []int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
cincludeLastOffset := int32(0)
if includeLastOffset { cincludeLastOffset = int32(1) }
var cpaddingIdxVal int64 = 0
var cpaddingIdxNull int = 1
if len(paddingIdx) > 0 {
cpaddingIdxVal = paddingIdx[0]
cpaddingIdxNull = 0
}
lib.AtgEmbeddingBagPaddingIdx(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, cpaddingIdxVal, cpaddingIdxNull)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
lib.AtgEmbeddingDenseBackward(ptr, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmbeddingDenseBackwardOut(out *Tensor, gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
lib.AtgEmbeddingDenseBackwardOut(ptr, out.ctensor, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmbeddingOut(out *Tensor, weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
csparse := int32(0)
if sparse { csparse = int32(1) }
lib.AtgEmbeddingOut(ptr, out.ctensor, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EmbeddingRenorm(indices *Tensor, maxNorm float64, normType float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmbeddingRenorm(ptr, ts.ctensor, indices.ctensor, maxNorm, normType)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmbeddingRenorm_(ptr, ts.ctensor, indices.ctensor, maxNorm, normType)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EmbeddingRenormOut(out *Tensor, indices *Tensor, maxNorm float64, normType float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmbeddingRenormOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, maxNorm, normType)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq { cscaleGradByFreq = int32(1) }
lib.AtgEmbeddingSparseBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Empty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgEmpty(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EmptyLike(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmptyLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EmptyLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmptyLikeOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmptyOut(out *Tensor, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgEmptyOut(ptr, out.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgEmptyQuantized(ptr, size, sizeLen, qtensor.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmptyQuantizedOut(out *Tensor, size []int64, qtensor *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgEmptyQuantizedOut(ptr, out.ctensor, size, sizeLen, qtensor.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
lib.AtgEmptyStrided(ptr, size, sizeLen, stride, strideLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EmptyStridedOut(out *Tensor, size []int64, stride []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
lib.AtgEmptyStridedOut(ptr, out.ctensor, size, sizeLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Eq(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEq(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Eq_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEq_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EqScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEqScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EqTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEqTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EqTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEqTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) EqTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEqTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) Equal(other *Tensor, del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgEqual(ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Erf(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Erf_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErf_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ErfOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Erfc(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Erfc_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfc_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ErfcOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfcOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Erfinv(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfinv(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Erfinv_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfinv_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ErfinvOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfinvOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Exp(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Exp2(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp2(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Exp2_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp2_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Exp2Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp2Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Exp_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ExpOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Expand(size []int64, implicit bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
cimplicit := int32(0)
if implicit { cimplicit = int32(1) }
lib.AtgExpand(ptr, ts.ctensor, size, sizeLen, cimplicit)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ExpandAs(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpandAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ExpandCopy(size []int64, implicit bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
cimplicit := int32(0)
if implicit { cimplicit = int32(1) }
lib.AtgExpandCopy(ptr, ts.ctensor, size, sizeLen, cimplicit)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ExpandCopyOut(out *Tensor, size []int64, implicit bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
cimplicit := int32(0)
if implicit { cimplicit = int32(1) }
lib.AtgExpandCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, cimplicit)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Expm1(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpm1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Expm1_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpm1_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Expm1Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpm1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Exponential(lambd float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExponential(ptr, ts.ctensor, lambd)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Exponential_(lambd float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExponential_(ptr, ts.ctensor, lambd)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ExponentialOut(out *Tensor, lambd float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExponentialOut(ptr, out.ctensor, ts.ctensor, lambd)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Eye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEye(ptr, n, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEyeM(ptr, n, m, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EyeMOut(out *Tensor, n int64, m int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEyeMOut(ptr, out.ctensor, n, m)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func EyeOut(out *Tensor, n int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEyeOut(ptr, out.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFakeQuantizePerChannelAffineCachemask(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func FakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerChannelAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FakeQuantizePerChannelAffineCachemaskOut(out0 *Tensor, out1 *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFakeQuantizePerChannelAffineCachemaskOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerTensorAffine(ptr, ts.ctensor, scale, zeroPoint, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFakeQuantizePerTensorAffineCachemask(ctensorPtr0, ts.ctensor, scale, zeroPoint, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func FakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerTensorAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FakeQuantizePerTensorAffineCachemaskOut(out0 *Tensor, out1 *Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFakeQuantizePerTensorAffineCachemaskOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, scale, zeroPoint, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerTensorAffineTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmLinearFp16Weight(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmLinearFp16WeightFp32Activation(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmLinearInt8Weight(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmLinearInt8WeightFp32Activation(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FbgemmPackGemmMatrixFp16(input *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmPackGemmMatrixFp16(ptr, input.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FbgemmPackQuantizedMatrix(input *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmPackQuantizedMatrix(ptr, input.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmPackQuantizedMatrixKn(ptr, input.ctensor, k, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FeatureAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgFeatureAlphaDropout(ptr, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FeatureAlphaDropout_(p float64, train bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgFeatureAlphaDropout_(ptr, ts.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func FeatureDropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgFeatureDropout(ptr, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FeatureDropout_(p float64, train bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgFeatureDropout_(ptr, ts.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftFft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftFft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftFft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftFft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftFft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftFft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftFftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftFftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FftFftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFftfreqOut(ptr, out.ctensor, n, d)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftFftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftFftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftFftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftFftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftFftshift(dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.AtgFftFftshift(ptr, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftHfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftHfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftHfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftHfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftHfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftHfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftHfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftHfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftHfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftHfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftHfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftHfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIfftshift(dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.AtgFftIfftshift(ptr, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIhfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIhfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIhfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIhfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIhfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIhfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIhfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIhfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIhfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIhfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIhfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIrfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIrfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIrfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIrfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIrfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIrfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIrfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIrfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftIrfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftRfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftRfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftRfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftRfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftRfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftRfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftRfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftRfftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FftRfftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftRfftfreqOut(ptr, out.ctensor, n, d)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftRfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftRfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sLen := len(s)
dimLen := len(dim)
lib.AtgFftRfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fill(value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFill(ptr, ts.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fill_(value *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFill_(ptr, ts.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FillDiagonal_(fillValue *Scalar, wrap bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cwrap := int32(0)
if wrap { cwrap = int32(1) }
lib.AtgFillDiagonal_(ptr, ts.ctensor, fillValue.cscalar, cwrap)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FillScalarOut(out *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFillScalarOut(ptr, out.ctensor, ts.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FillTensor(value *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFillTensor(ptr, ts.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FillTensor_(value *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFillTensor_(ptr, ts.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FillTensorOut(out *Tensor, value *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFillTensorOut(ptr, out.ctensor, ts.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fix(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFix(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fix_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFix_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FixOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFixOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Flatten(startDim int64, endDim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFlatten(ptr, ts.ctensor, startDim, endDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FlattenDenseTensors(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgFlattenDenseTensors(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Flip(dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgFlip(ptr, ts.ctensor, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FlipOut(out *Tensor, dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgFlipOut(ptr, out.ctensor, ts.ctensor, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fliplr(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFliplr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Flipud(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFlipud(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloatPower(exponent *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPower(ptr, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloatPower_(exponent *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPower_(ptr, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func FloatPowerScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerScalar(ptr, selfScalar.cscalar, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloatPowerTensor_(exponent *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerTensor_(ptr, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloatPowerTensorScalar(exponent *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerTensorScalar(ptr, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Floor(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloor(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Floor_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloor_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloorDivide(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivide(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloorDivide_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivide_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloorDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloorDivideScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivideScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloorDivideScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivideScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FloorOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fmax(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmax(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FmaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fmin(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmin(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FminOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFminOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fmod(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmod(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Fmod_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmod_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FmodScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmodScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FmodTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmodTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FmodTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmodTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FmodTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmodTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Frac(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFrac(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Frac_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFrac_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FracOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFracOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FractionalMaxPool2d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
outputSizeLen := len(outputSize)
lib.AtgFractionalMaxPool2d(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, randomSamples.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
outputSizeLen := len(outputSize)
lib.AtgFractionalMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
outputSizeLen := len(outputSize)
lib.AtgFractionalMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
outputSizeLen := len(outputSize)
lib.AtgFractionalMaxPool2dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, randomSamples.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FractionalMaxPool3d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
outputSizeLen := len(outputSize)
lib.AtgFractionalMaxPool3d(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, randomSamples.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
outputSizeLen := len(outputSize)
lib.AtgFractionalMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
outputSizeLen := len(outputSize)
lib.AtgFractionalMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
outputSizeLen := len(outputSize)
lib.AtgFractionalMaxPool3dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, randomSamples.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Frexp(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFrexp(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) FrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFrexpTensorOut(ctensorPtr0, mantissa.ctensor, exponent.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FrobeniusNorm(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgFrobeniusNorm(ptr, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgFrobeniusNormOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cshared := int32(0)
if shared { cshared = int32(1) }
var csizeVal int64 = 0
var csizeNull int = 1
if len(size) > 0 {
csizeVal = size[0]
csizeNull = 0
}
lib.AtgFromFile(ptr, filename, cshared, csizeVal, csizeNull, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FromFileOut(out *Tensor, filename string, shared bool, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cshared := int32(0)
if shared { cshared = int32(1) }
var csizeVal int64 = 0
var csizeNull int = 1
if len(size) > 0 {
csizeVal = size[0]
csizeNull = 0
}
lib.AtgFromFileOut(ptr, out.ctensor, filename, cshared, csizeVal, csizeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Full(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgFull(ptr, size, sizeLen, fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FullLike(fillValue *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFullLike(ptr, ts.ctensor, fillValue.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FullLikeOut(out *Tensor, fillValue *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFullLikeOut(ptr, out.ctensor, ts.ctensor, fillValue.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func FullOut(out *Tensor, size []int64, fillValue *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgFullOut(ptr, out.ctensor, size, sizeLen, fillValue.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) FusedMovingAvgObsFakeQuant(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperRowFakeQuant := int32(0)
if perRowFakeQuant { cperRowFakeQuant = int32(1) }
csymmetricQuant := int32(0)
if symmetricQuant { csymmetricQuant = int32(1) }
lib.AtgFusedMovingAvgObsFakeQuant(ptr, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Gather(dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
csparseGrad := int32(0)
if sparseGrad { csparseGrad = int32(1) }
lib.AtgGather(ptr, ts.ctensor, dim, index.ctensor, csparseGrad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
csparseGrad := int32(0)
if sparseGrad { csparseGrad = int32(1) }
lib.AtgGatherBackward(ptr, grad.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
csparseGrad := int32(0)
if sparseGrad { csparseGrad = int32(1) }
lib.AtgGatherOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Gcd(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGcd(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Gcd_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGcd_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GcdOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGcdOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ge(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGe(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ge_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGe_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GeTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GeTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Gelu(approximate string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGelu(ptr, ts.ctensor, approximate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Gelu_(approximate string)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGelu_(ptr, ts.ctensor, approximate)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GeluBackward(gradOutput *Tensor, approximate string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeluBackward(ptr, gradOutput.ctensor, ts.ctensor, approximate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GeluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, approximate string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, approximate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GeluOut(out *Tensor, approximate string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeluOut(ptr, out.ctensor, ts.ctensor, approximate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Geometric(p float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeometric(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Geometric_(p float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeometric_(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GeometricOut(out *Tensor, p float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeometricOut(ptr, out.ctensor, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Geqrf(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgGeqrf(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) GeqrfA(a *Tensor, tau *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgGeqrfA(ctensorPtr0, a.ctensor, tau.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ger(vec2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGer(ptr, ts.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GerOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGerOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Glu(dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGlu(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GluBackward(gradOutput *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluBackward(ptr, gradOutput.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GluBackwardJvp(gradX *Tensor, gradGlu *Tensor, x *Tensor, dgradGlu *Tensor, dx *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluBackwardJvp(ptr, gradX.ctensor, gradGlu.ctensor, x.ctensor, dgradGlu.ctensor, dx.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GluBackwardJvpOut(out *Tensor, gradX *Tensor, gradGlu *Tensor, x *Tensor, dgradGlu *Tensor, dx *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluBackwardJvpOut(ptr, out.ctensor, gradX.ctensor, gradGlu.ctensor, x.ctensor, dgradGlu.ctensor, dx.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GluJvp(glu *Tensor, x *Tensor, dx *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluJvp(ptr, glu.ctensor, x.ctensor, dx.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GluJvpOut(out *Tensor, glu *Tensor, x *Tensor, dx *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluJvpOut(ptr, out.ctensor, glu.ctensor, x.ctensor, dx.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GluOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluOut(ptr, out.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Grad(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGrad(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Greater(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreater(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Greater_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreater_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterEqual(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqual(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterEqual_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqual_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterEqualTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqualTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterEqualTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqualTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GreaterTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.AtgGridSampler(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.AtgGridSampler2d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GridSampler2dOut(out *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.AtgGridSampler2dOut(ptr, out.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.AtgGridSampler3d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GridSampler3dOut(out *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
lib.AtgGridSampler3dOut(ptr, out.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func GroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccudnnEnabled := int32(0)
if cudnnEnabled { ccudnnEnabled = int32(1) }
lib.AtgGroupNorm(ptr, input.ctensor, numGroups, weight.ctensor, bias.ctensor, eps, ccudnnEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func Gru(input *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.AtgGru(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func GruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func GruData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
lib.AtgGruData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Gt(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGt(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Gt_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGt_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GtTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGtTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GtTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGtTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) GtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHammingWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HammingWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHammingWindowOut(ptr, out.ctensor, windowLength)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgHammingWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgHammingWindowPeriodicAlpha(ptr, windowLength, cperiodic, alpha, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgHammingWindowPeriodicAlphaBeta(ptr, windowLength, cperiodic, alpha, beta, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HammingWindowPeriodicAlphaBetaOut(out *Tensor, windowLength int64, periodic bool, alpha float64, beta float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgHammingWindowPeriodicAlphaBetaOut(ptr, out.ctensor, windowLength, cperiodic, alpha, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HammingWindowPeriodicAlphaOut(out *Tensor, windowLength int64, periodic bool, alpha float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgHammingWindowPeriodicAlphaOut(ptr, out.ctensor, windowLength, cperiodic, alpha)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HammingWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgHammingWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHannWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HannWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHannWindowOut(ptr, out.ctensor, windowLength)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgHannWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HannWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgHannWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hardshrink(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardshrink(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardshrinkBackward(ptr, gradOut.ctensor, ts.ctensor, lambd.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOut.ctensor, ts.ctensor, lambd.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardshrinkOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardshrinkOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hardsigmoid(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoid(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hardsigmoid_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoid_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardsigmoidBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardsigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoidOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hardswish(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswish(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hardswish_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswish_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardswishBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswishBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardswishBackwardOut(out *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswishBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardswishOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswishOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hardtanh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hardtanh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanhBackward(ptr, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HardtanhOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Heaviside(values *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHeaviside(ptr, ts.ctensor, values.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Heaviside_(values *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHeaviside_(ptr, ts.ctensor, values.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HeavisideOut(out *Tensor, values *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHeavisideOut(ptr, out.ctensor, ts.ctensor, values.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHingeEmbeddingLoss(ptr, ts.ctensor, target.ctensor, margin, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Histc(bins int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHistc(ptr, ts.ctensor, bins)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HistcOut(out *Tensor, bins int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHistcOut(ptr, out.ctensor, ts.ctensor, bins)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Hspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHspmm(ptr, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHspmmOut(ptr, out.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Hstack(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgHstack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func HstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgHstackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HuberLoss(target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHuberLoss(ptr, ts.ctensor, target.ctensor, reduction, delta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HuberLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHuberLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHuberLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HuberLossOut(out *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHuberLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, delta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hypot(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHypot(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Hypot_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHypot_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) HypotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHypotOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) I0(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgI0(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) I0_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgI0_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) I0Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgI0Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Igamma(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgamma(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Igamma_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgamma_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IgammaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgammaOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Igammac(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgammac(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Igammac_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgammac_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IgammacOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgammacOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Im2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
dilationLen := len(dilation)
paddingLen := len(padding)
strideLen := len(stride)
lib.AtgIm2col(ptr, ts.ctensor, kernelSize, kernelSizeLen, dilation, dilationLen, padding, paddingLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Im2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
dilationLen := len(dilation)
paddingLen := len(padding)
strideLen := len(stride)
lib.AtgIm2colOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, dilation, dilationLen, padding, paddingLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Imag(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgImag(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexAdd(ptr, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexAdd_(dim int64, index *Tensor, source *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexAdd_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexAddOut(out *Tensor, dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexCopy(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexCopy(ptr, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexCopy_(dim int64, index *Tensor, source *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexCopyOut(out *Tensor, dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexCopyOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexFill(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFill(ptr, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexFill_(dim int64, index *Tensor, value *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFill_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexFillIntScalarOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFillIntScalarOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFillIntTensor(ptr, ts.ctensor, dim, index.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexFillIntTensor_(dim int64, index *Tensor, value *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFillIntTensor_(ptr, ts.ctensor, dim, index.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexFillIntTensorOut(out *Tensor, dim int64, index *Tensor, value *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFillIntTensorOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexPutOut(out *Tensor, indices []*Tensor, values *Tensor, accumulate bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cindices []lib.Ctensor
for _, t := range indices {cindices = append(cindices, t.ctensor)}
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
lib.AtgIndexPutOut(ptr, out.ctensor, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexReduce(dim int64, index *Tensor, source *Tensor, reduce string, includeSelf bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cincludeSelf := int32(0)
if includeSelf { cincludeSelf = int32(1) }
lib.AtgIndexReduce(ptr, ts.ctensor, dim, index.ctensor, source.ctensor, reduce, cincludeSelf)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexReduce_(dim int64, index *Tensor, source *Tensor, reduce string, includeSelf bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cincludeSelf := int32(0)
if includeSelf { cincludeSelf = int32(1) }
lib.AtgIndexReduce_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor, reduce, cincludeSelf)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexReduceOut(out *Tensor, dim int64, index *Tensor, source *Tensor, reduce string, includeSelf bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cincludeSelf := int32(0)
if includeSelf { cincludeSelf = int32(1) }
lib.AtgIndexReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, source.ctensor, reduce, cincludeSelf)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func IndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
selfSizesLen := len(selfSizes)
lib.AtgIndexSelectBackward(ptr, grad.ctensor, selfSizes, selfSizesLen, dim, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexSelectOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndexTensorOut(out *Tensor, indices []*Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cindices []lib.Ctensor
for _, t := range indices {cindices = append(cindices, t.ctensor)}
lib.AtgIndexTensorOut(ptr, out.ctensor, ts.ctensor, cindices, len(cindices))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Indices(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndicesCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndicesCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndicesCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) InfinitelyDifferentiableGeluBackward(grad *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInfinitelyDifferentiableGeluBackward(ptr, grad.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Inner(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInner(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) InnerOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInnerOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func InstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cuseInputStats := int32(0)
if useInputStats { cuseInputStats = int32(1) }
ccudnnEnabled := int32(0)
if cudnnEnabled { ccudnnEnabled = int32(1) }
lib.AtgInstanceNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, cuseInputStats, momentum, eps, ccudnnEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IntRepr(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIntRepr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IntReprOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIntReprOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Inverse(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInverse(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) InverseOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInverseOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsCoalesced(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsCoalesced(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsComplex(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsComplex(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsConj(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsConj(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsDistributed(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsDistributed(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsFloatingPoint(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsFloatingPoint(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsInference(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsInference(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsLeaf(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsLeaf(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsNeg(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsNeg(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsNonzero(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsNonzero(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsPinned(device gotch.Device, del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsPinned(ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsSameSize(other *Tensor, del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsSameSize(ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsSetTo(tensor *Tensor, del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsSetTo(ts.ctensor, tensor.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) IsSigned(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgIsSigned(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func IsVulkanAvailable()(retVal bool, err error) {
retVal = lib.AtgIsVulkanAvailable()
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Isclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cequalNan := int32(0)
if equalNan { cequalNan = int32(1) }
lib.AtgIsclose(ptr, ts.ctensor, other.ctensor, rtol, atol, cequalNan)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Isfinite(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsfinite(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Isin(elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique { cassumeUnique = int32(1) }
cinvert := int32(0)
if invert { cinvert = int32(1) }
lib.AtgIsin(ptr, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func IsinScalarTensor(element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique { cassumeUnique = int32(1) }
cinvert := int32(0)
if invert { cinvert = int32(1) }
lib.AtgIsinScalarTensor(ptr, element.cscalar, testElements.ctensor, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func IsinScalarTensorOut(out *Tensor, element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique { cassumeUnique = int32(1) }
cinvert := int32(0)
if invert { cinvert = int32(1) }
lib.AtgIsinScalarTensorOut(ptr, out.ctensor, element.cscalar, testElements.ctensor, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func IsinTensorScalar(elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique { cassumeUnique = int32(1) }
cinvert := int32(0)
if invert { cinvert = int32(1) }
lib.AtgIsinTensorScalar(ptr, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func IsinTensorScalarOut(out *Tensor, elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique { cassumeUnique = int32(1) }
cinvert := int32(0)
if invert { cinvert = int32(1) }
lib.AtgIsinTensorScalarOut(ptr, out.ctensor, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func IsinTensorTensorOut(out *Tensor, elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique { cassumeUnique = int32(1) }
cinvert := int32(0)
if invert { cinvert = int32(1) }
lib.AtgIsinTensorTensorOut(ptr, out.ctensor, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Isinf(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsinf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IsinfOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsinfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Isnan(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsnan(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IsnanOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsnanOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Isneginf(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsneginf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IsneginfOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsneginfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Isposinf(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsposinf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) IsposinfOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsposinfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Isreal(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsreal(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Istft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var chopLengthVal int64 = 0
var chopLengthNull int = 1
if len(hopLength) > 0 {
chopLengthVal = hopLength[0]
chopLengthNull = 0
}
var cwinLengthVal int64 = 0
var cwinLengthNull int = 1
if len(winLength) > 0 {
cwinLengthVal = winLength[0]
cwinLengthNull = 0
}
ccenter := int32(0)
if center { ccenter = int32(1) }
cnormalized := int32(0)
if normalized { cnormalized = int32(1) }
conesided := int32(0)
if onesided { conesided = int32(1) }
var clengthVal int64 = 0
var clengthNull int = 1
if len(length) > 0 {
clengthVal = length[0]
clengthNull = 0
}
creturnComplex := int32(0)
if returnComplex { creturnComplex = int32(1) }
lib.AtgIstft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, ccenter, cnormalized, conesided, clengthVal, clengthNull, creturnComplex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func KaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgKaiserWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func KaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgKaiserWindowBeta(ptr, windowLength, cperiodic, beta, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func KaiserWindowBetaOut(out *Tensor, windowLength int64, periodic bool, beta float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgKaiserWindowBetaOut(ptr, out.ctensor, windowLength, cperiodic, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func KaiserWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgKaiserWindowOut(ptr, out.ctensor, windowLength)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func KaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgKaiserWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func KaiserWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic { cperiodic = int32(1) }
lib.AtgKaiserWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) KlDiv(target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
clogTarget := int32(0)
if logTarget { clogTarget = int32(1) }
lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction, clogTarget)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Kron(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgKron(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) KronOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgKronOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Kthvalue(k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgKthvalue(ctensorPtr0, ts.ctensor, k, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) KthvalueValues(values *Tensor, indices *Tensor, k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgKthvalueValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) L1Loss(target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgL1Loss(ptr, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
normalizedShapeLen := len(normalizedShape)
ccudnnEnable := int32(0)
if cudnnEnable { ccudnnEnable = int32(1) }
lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, normalizedShapeLen, weight.ctensor, bias.ctensor, eps, ccudnnEnable)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lcm(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLcm(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lcm_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLcm_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LcmOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLcmOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ldexp(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLdexp(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ldexp_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLdexp_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LdexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLdexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Le(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLe(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Le_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLe_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeakyRelu(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeakyRelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeakyRelu_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeakyRelu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cselfIsResult := int32(0)
if selfIsResult { cselfIsResult = int32(1) }
lib.AtgLeakyReluBackward(ptr, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cselfIsResult := int32(0)
if selfIsResult { cselfIsResult = int32(1) }
lib.AtgLeakyReluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LeakyReluOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeakyReluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lerp(end *Tensor, weight *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerp(ptr, ts.ctensor, end.ctensor, weight.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lerp_(end *Tensor, weight *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerp_(ptr, ts.ctensor, end.ctensor, weight.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerpScalarOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LerpTensor(end *Tensor, weight *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerpTensor(ptr, ts.ctensor, end.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LerpTensor_(end *Tensor, weight *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerpTensor_(ptr, ts.ctensor, end.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerpTensorOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Less(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLess(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Less_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLess_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessEqual(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqual(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessEqual_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqual_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessEqualTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqualTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessEqualTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqualTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LessTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lgamma(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLgamma(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lgamma_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLgamma_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LgammaOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLgammaOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lift(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLift(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LiftFresh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLiftFresh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LiftFreshCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLiftFreshCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LiftFreshCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLiftFreshCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LiftOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLiftOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgCholesky(upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.AtgLinalgCholesky(ptr, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgCholeskyEx(upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cupper := int32(0)
if upper { cupper = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgCholeskyEx(ctensorPtr0, ts.ctensor, cupper, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgCholeskyExL(l *Tensor, info *Tensor, upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cupper := int32(0)
if upper { cupper = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgCholeskyExL(ctensorPtr0, l.ctensor, info.ctensor, ts.ctensor, cupper, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
lib.AtgLinalgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgCond(p *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCond(ptr, ts.ctensor, p.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgCondOut(out *Tensor, p *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCondOut(ptr, out.ctensor, ts.ctensor, p.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgCondPStr(p string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCondPStr(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgCondPStrOut(out *Tensor, p string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCondPStrOut(ptr, out.ctensor, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgCross(other *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCross(ptr, ts.ctensor, other.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgCrossOut(out *Tensor, other *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgDet(a *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgDet(ptr, a.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgDetOut(out *Tensor, a *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgDetOut(ptr, out.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgDiagonal(a *Tensor, offset int64, dim1 int64, dim2 int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgDiagonal(ptr, a.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgEig(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgEig(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgEigOut(ctensorPtr0, eigenvalues.ctensor, eigenvectors.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgEigh(uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgEigh(ctensorPtr0, ts.ctensor, uPLO)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgEighEigvals(ctensorPtr0, eigvals.ctensor, eigvecs.ctensor, ts.ctensor, uPLO)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgEigvals(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgEigvals(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgEigvalsOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgEigvalsOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgEigvalsh(uPLO string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgEigvalsh(ptr, ts.ctensor, uPLO)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgEigvalshOut(out *Tensor, uPLO string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgEigvalshOut(ptr, out.ctensor, ts.ctensor, uPLO)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgHouseholderProduct(input *Tensor, tau *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgHouseholderProduct(ptr, input.ctensor, tau.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgHouseholderProductOut(ptr, out.ctensor, input.ctensor, tau.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgInv(a *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgInv(ptr, a.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgInvEx(a *Tensor, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgInvEx(ctensorPtr0, a.ctensor, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgInvExInverse(inverse *Tensor, info *Tensor, a *Tensor, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgInvExInverse(ctensorPtr0, inverse.ctensor, info.ctensor, a.ctensor, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgInvOut(out *Tensor, a *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgInvOut(ptr, out.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgLdlFactor(hermitian bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgLdlFactor(ctensorPtr0, ts.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgLdlFactorEx(hermitian bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgLdlFactorEx(ctensorPtr0, ts.ctensor, chermitian, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgLdlFactorExOut(lD *Tensor, pivots *Tensor, info *Tensor, hermitian bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgLdlFactorExOut(ctensorPtr0, lD.ctensor, pivots.ctensor, info.ctensor, ts.ctensor, chermitian, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgLdlFactorOut(lD *Tensor, pivots *Tensor, hermitian bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgLdlFactorOut(ctensorPtr0, lD.ctensor, pivots.ctensor, ts.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgLdlSolve(lD *Tensor, pivots *Tensor, b *Tensor, hermitian bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgLdlSolve(ptr, lD.ctensor, pivots.ctensor, b.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgLdlSolveOut(out *Tensor, lD *Tensor, pivots *Tensor, b *Tensor, hermitian bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgLdlSolveOut(ptr, out.ctensor, lD.ctensor, pivots.ctensor, b.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgLstsq(b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
var crcondVal float64 = 0.0
var crcondNull int = 1
if len(rcond) > 0 {
crcondVal = rcond[0]
crcondNull = 0
}
lib.AtgLinalgLstsq(ctensorPtr0, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) LinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tensor, singularValues *Tensor, b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
var crcondVal float64 = 0.0
var crcondNull int = 1
if len(rcond) > 0 {
crcondVal = rcond[0]
crcondNull = 0
}
lib.AtgLinalgLstsqOut(ctensorPtr0, solution.ctensor, residuals.ctensor, rank.ctensor, singularValues.ctensor, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgLu(a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot { cpivot = int32(1) }
lib.AtgLinalgLu(ctensorPtr0, a.ctensor, cpivot)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgLuFactor(a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot { cpivot = int32(1) }
lib.AtgLinalgLuFactor(ctensorPtr0, a.ctensor, cpivot)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgLuFactorEx(a *Tensor, pivot bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot { cpivot = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgLuFactorEx(ctensorPtr0, a.ctensor, cpivot, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgLuFactorExOut(lU *Tensor, pivots *Tensor, info *Tensor, a *Tensor, pivot bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot { cpivot = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgLuFactorExOut(ctensorPtr0, lU.ctensor, pivots.ctensor, info.ctensor, a.ctensor, cpivot, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgLuFactorOut(lU *Tensor, pivots *Tensor, a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot { cpivot = int32(1) }
lib.AtgLinalgLuFactorOut(ctensorPtr0, lU.ctensor, pivots.ctensor, a.ctensor, cpivot)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgLuOut(p *Tensor, l *Tensor, u *Tensor, a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot { cpivot = int32(1) }
lib.AtgLinalgLuOut(ctensorPtr0, p.ctensor, l.ctensor, u.ctensor, a.ctensor, cpivot)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgLuSolve(lU *Tensor, pivots *Tensor, b *Tensor, left bool, adjoint bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cleft := int32(0)
if left { cleft = int32(1) }
cadjoint := int32(0)
if adjoint { cadjoint = int32(1) }
lib.AtgLinalgLuSolve(ptr, lU.ctensor, pivots.ctensor, b.ctensor, cleft, cadjoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgLuSolveOut(out *Tensor, lU *Tensor, pivots *Tensor, b *Tensor, left bool, adjoint bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cleft := int32(0)
if left { cleft = int32(1) }
cadjoint := int32(0)
if adjoint { cadjoint = int32(1) }
lib.AtgLinalgLuSolveOut(ptr, out.ctensor, lU.ctensor, pivots.ctensor, b.ctensor, cleft, cadjoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatmul(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatmul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatrixExp(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatrixExp(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatrixExpOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatrixExpOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatrixPower(n int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatrixPower(ptr, ts.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatrixRank(tol float64, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgMatrixRank(ptr, ts.ctensor, tol, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatrixRankAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var catolVal float64 = 0.0
var catolNull int = 1
if len(atol) > 0 {
catolVal = atol[0]
catolNull = 0
}
var crtolVal float64 = 0.0
var crtolNull int = 1
if len(rtol) > 0 {
crtolVal = rtol[0]
crtolNull = 0
}
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgMatrixRankAtolRtolFloat(ptr, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatrixRankAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var catolVal float64 = 0.0
var catolNull int = 1
if len(atol) > 0 {
catolVal = atol[0]
catolNull = 0
}
var crtolVal float64 = 0.0
var crtolNull int = 1
if len(rtol) > 0 {
crtolVal = rtol[0]
crtolNull = 0
}
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgMatrixRankAtolRtolFloatOut(ptr, out.ctensor, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgMatrixRankAtolRtolTensor(input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgMatrixRankAtolRtolTensor(ptr, input.ctensor, atol.ctensor, rtol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgMatrixRankAtolRtolTensorOut(out *Tensor, input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgMatrixRankAtolRtolTensorOut(ptr, out.ctensor, input.ctensor, atol.ctensor, rtol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgMatrixRankOut(out *Tensor, tol float64, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgMatrixRankOut(ptr, out.ctensor, ts.ctensor, tol, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgMatrixRankOutTolTensor(ptr, out.ctensor, input.ctensor, tol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgMatrixRankTolTensor(ptr, input.ctensor, tol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgMultiDot(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgLinalgMultiDot(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgMultiDotOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgLinalgMultiDotOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgLinalgNorm(ptr, ts.ctensor, ord.cscalar, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgNormOrdStr(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgLinalgNormOrdStr(ptr, ts.ctensor, ord, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgNormOrdStrOut(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgLinalgNormOrdStrOut(ptr, out.ctensor, ts.ctensor, ord, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgLinalgNormOut(ptr, out.ctensor, ts.ctensor, ord.cscalar, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgPinv(rcond float64, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgPinv(ptr, ts.ctensor, rcond, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgPinvAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var catolVal float64 = 0.0
var catolNull int = 1
if len(atol) > 0 {
catolVal = atol[0]
catolNull = 0
}
var crtolVal float64 = 0.0
var crtolNull int = 1
if len(rtol) > 0 {
crtolVal = rtol[0]
crtolNull = 0
}
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgPinvAtolRtolFloat(ptr, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgPinvAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var catolVal float64 = 0.0
var catolNull int = 1
if len(atol) > 0 {
catolVal = atol[0]
catolNull = 0
}
var crtolVal float64 = 0.0
var crtolNull int = 1
if len(rtol) > 0 {
crtolVal = rtol[0]
crtolNull = 0
}
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgPinvAtolRtolFloatOut(ptr, out.ctensor, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgPinvAtolRtolTensor(atol *Tensor, rtol *Tensor, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgPinvAtolRtolTensor(ptr, ts.ctensor, atol.ctensor, rtol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgPinvAtolRtolTensorOut(out *Tensor, atol *Tensor, rtol *Tensor, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgPinvAtolRtolTensorOut(ptr, out.ctensor, ts.ctensor, atol.ctensor, rtol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgPinvOut(ptr, out.ctensor, ts.ctensor, rcond, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgPinvOutRcondTensor(ptr, out.ctensor, ts.ctensor, rcond.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian { chermitian = int32(1) }
lib.AtgLinalgPinvRcondTensor(ptr, ts.ctensor, rcond.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgQr(a *Tensor, mode string)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgQr(ctensorPtr0, a.ctensor, mode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgQrOut(q *Tensor, r *Tensor, a *Tensor, mode string)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgQrOut(ctensorPtr0, q.ctensor, r.ctensor, a.ctensor, mode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgSlogdet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgSlogdet(ctensorPtr0, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgSlogdetOut(ctensorPtr0, sign.ctensor, logabsdet.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgSolve(a *Tensor, b *Tensor, left bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cleft := int32(0)
if left { cleft = int32(1) }
lib.AtgLinalgSolve(ptr, a.ctensor, b.ctensor, cleft)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgSolveEx(a *Tensor, b *Tensor, left bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cleft := int32(0)
if left { cleft = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgSolveEx(ctensorPtr0, a.ctensor, b.ctensor, cleft, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgSolveExOut(result *Tensor, info *Tensor, a *Tensor, b *Tensor, left bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cleft := int32(0)
if left { cleft = int32(1) }
ccheckErrors := int32(0)
if checkErrors { ccheckErrors = int32(1) }
lib.AtgLinalgSolveExOut(ctensorPtr0, result.ctensor, info.ctensor, a.ctensor, b.ctensor, cleft, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgSolveOut(out *Tensor, a *Tensor, b *Tensor, left bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cleft := int32(0)
if left { cleft = int32(1) }
lib.AtgLinalgSolveOut(ptr, out.ctensor, a.ctensor, b.ctensor, cleft)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgSolveTriangular(b *Tensor, upper bool, left bool, unitriangular bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
cleft := int32(0)
if left { cleft = int32(1) }
cunitriangular := int32(0)
if unitriangular { cunitriangular = int32(1) }
lib.AtgLinalgSolveTriangular(ptr, ts.ctensor, b.ctensor, cupper, cleft, cunitriangular)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgSolveTriangularOut(out *Tensor, b *Tensor, upper bool, left bool, unitriangular bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper { cupper = int32(1) }
cleft := int32(0)
if left { cleft = int32(1) }
cunitriangular := int32(0)
if unitriangular { cunitriangular = int32(1) }
lib.AtgLinalgSolveTriangularOut(ptr, out.ctensor, ts.ctensor, b.ctensor, cupper, cleft, cunitriangular)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgSvd(a *Tensor, fullMatrices bool, driver string)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cfullMatrices := int32(0)
if fullMatrices { cfullMatrices = int32(1) }
lib.AtgLinalgSvd(ctensorPtr0, a.ctensor, cfullMatrices, driver)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool, driver string)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cfullMatrices := int32(0)
if fullMatrices { cfullMatrices = int32(1) }
lib.AtgLinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, a.ctensor, cfullMatrices, driver)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgSvdvals(a *Tensor, driver string)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgSvdvals(ptr, a.ctensor, driver)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgSvdvalsOut(out *Tensor, a *Tensor, driver string)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgSvdvalsOut(ptr, out.ctensor, a.ctensor, driver)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgTensorinv(ind int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgTensorinv(ptr, ts.ctensor, ind)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgTensorinvOut(out *Tensor, ind int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgTensorinvOut(ptr, out.ctensor, ts.ctensor, ind)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgTensorsolve(other *Tensor, dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgLinalgTensorsolve(ptr, ts.ctensor, other.ctensor, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgLinalgTensorsolveOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgVander(x *Tensor, n []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgLinalgVander(ptr, x.ctensor, cnVal, cnNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgVecdot(x *Tensor, y *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgVecdot(ptr, x.ctensor, y.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinalgVecdotOut(out *Tensor, x *Tensor, y *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgVecdotOut(ptr, out.ctensor, x.ctensor, y.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Linear(input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinearOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Linspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinspace(ptr, start.cscalar, end.cscalar, steps, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log10(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog10(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log10_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog10_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log10Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog10Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log1p(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog1p(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log1p_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog1p_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log1pOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog1pOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log2(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog2(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log2_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog2_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log2Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog2Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Log_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogNormal(mean float64, std float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogNormal(ptr, ts.ctensor, mean, std)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogNormal_(mean float64, std float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogNormal_(ptr, ts.ctensor, mean, std)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogNormalOut(out *Tensor, mean float64, std float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogNormalOut(ptr, out.ctensor, ts.ctensor, mean, std)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogSigmoid(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSigmoid(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor, buffer.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, buffer.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogSigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSigmoidOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogSoftmaxIntOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSoftmaxIntOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Logaddexp(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogaddexp(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Logaddexp2(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogaddexp2(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Logaddexp2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogaddexp2Out(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogaddexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogaddexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Logcumsumexp(dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogcumsumexp(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Logdet(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogdet(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalAnd(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalAnd(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalAnd_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalAnd_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalAndOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalNot(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalNot(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalNot_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalNot_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalNotOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalNotOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalOr(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalOr(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalOr_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalOr_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalOrOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalXor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalXor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalXor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalXor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogicalXorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Logit(eps []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogit(ptr, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Logit_(eps []float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogit_(ptr, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogitBackward(gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogitBackward(ptr, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogitBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Logspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogspace(ptr, start.cscalar, end.cscalar, steps, base, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func LogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps, base)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Logsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgLogsumexp(ptr, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func Lstm(input *Tensor, hx []*Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {chx = append(chx, t.ctensor)}
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.AtgLstm(ctensorPtr0, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LstmCell(input *Tensor, hx []*Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {chx = append(chx, t.ctensor)}
lib.AtgLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LstmData(data *Tensor, batchSizes *Tensor, hx []*Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {chx = append(chx, t.ctensor)}
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
lib.AtgLstmData(ctensorPtr0, data.ctensor, batchSizes.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lt(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLt(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Lt_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLt_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LtTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLtTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LtTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLtTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LuSolve(lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLuSolve(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) LuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLuSolveOut(ptr, out.ctensor, ts.ctensor, lUData.ctensor, lUPivots.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LuUnpack(lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cunpackData := int32(0)
if unpackData { cunpackData = int32(1) }
cunpackPivots := int32(0)
if unpackPivots { cunpackPivots = int32(1) }
lib.AtgLuUnpack(ctensorPtr0, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func LuUnpackOut(p *Tensor, l *Tensor, u *Tensor, lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cunpackData := int32(0)
if unpackData { cunpackData = int32(1) }
cunpackPivots := int32(0)
if unpackPivots { cunpackPivots = int32(1) }
lib.AtgLuUnpackOut(ctensorPtr0, p.ctensor, l.ctensor, u.ctensor, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func MarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMarginRankingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedFill(mask *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFill(ptr, ts.ctensor, mask.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedFill_(mask *Tensor, value *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFill_(ptr, ts.ctensor, mask.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedFillScalarOut(out *Tensor, mask *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFillScalarOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedFillTensor(mask *Tensor, value *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFillTensor(ptr, ts.ctensor, mask.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedFillTensor_(mask *Tensor, value *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFillTensor_(ptr, ts.ctensor, mask.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedFillTensorOut(out *Tensor, mask *Tensor, value *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFillTensorOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedScatter(mask *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedScatter(ptr, ts.ctensor, mask.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedScatter_(mask *Tensor, source *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedScatter_(ptr, ts.ctensor, mask.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedScatterOut(out *Tensor, mask *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedScatterOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedSelect(mask *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedSelect(ptr, ts.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func MaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedSelectBackward(ptr, grad.ctensor, input.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaskedSelectOut(out *Tensor, mask *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedSelectOut(ptr, out.ctensor, ts.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Matmul(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatmul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MatrixExp(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixExp(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MatrixExpBackward(grad *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixExpBackward(ptr, ts.ctensor, grad.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MatrixH(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixH(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MatrixPower(n int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixPower(ptr, ts.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Max(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMax(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMaxDimMax(ctensorPtr0, max.ctensor, maxValues.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxOther(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxOther(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool1d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MaxPool1dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool1dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool2d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool2dBackwardOut(out *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool2dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MaxPool2dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool2dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool2dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool2dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool2dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MaxPool2dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool2dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool3d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MaxPool3dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool3dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool3dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxPool3dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool3dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MaxPool3dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMaxPool3dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxUnaryOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnaryOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxUnpool2d(indices *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgMaxUnpool2d(ptr, ts.ctensor, indices.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgMaxUnpool2dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
strideLen := len(stride)
paddingLen := len(padding)
lib.AtgMaxUnpool3d(ptr, ts.ctensor, indices.ctensor, outputSize, outputSizeLen, stride, strideLen, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
strideLen := len(stride)
paddingLen := len(padding)
lib.AtgMaxUnpool3dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, outputSizeLen, stride, strideLen, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Maximum(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaximum(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MaximumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaximumOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mean(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMean(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MeanDim(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMeanDim(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMeanOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Median(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMedian(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MedianOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMedianOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Min(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMin(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MinDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMinDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) MinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMinDimMin(ctensorPtr0, min.ctensor, minIndices.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MinOther(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMinOther(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MinOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMinOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Minimum(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMinimum(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MinimumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMinimumOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgMiopenBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgMiopenBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MiopenBatchNormBackwardOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgMiopenBatchNormBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MiopenBatchNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgMiopenBatchNormOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
lib.AtgMiopenConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MiopenConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgMiopenConvolutionAddRelu(ptr, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MiopenConvolutionOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
lib.AtgMiopenConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MiopenConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgMiopenConvolutionRelu(ptr, ts.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
lib.AtgMiopenConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MiopenConvolutionTransposeOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
lib.AtgMiopenConvolutionTransposeOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
lib.AtgMiopenDepthwiseConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MiopenDepthwiseConvolutionOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
cbenchmark := int32(0)
if benchmark { cbenchmark = int32(1) }
cdeterministic := int32(0)
if deterministic { cdeterministic = int32(1) }
lib.AtgMiopenDepthwiseConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MiopenRnn(input *Tensor, weight []*Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
var cweight []lib.Ctensor
for _, t := range weight {cweight = append(cweight, t.ctensor)}
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
batchSizesLen := len(batchSizes)
lib.AtgMiopenRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, hx.ctensor, cx.ctensor, mode, hiddenSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, batchSizesLen, dropoutState.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MiopenRnnOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, out4 *Tensor, input *Tensor, weight []*Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
var cweight []lib.Ctensor
for _, t := range weight {cweight = append(cweight, t.ctensor)}
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
batchSizesLen := len(batchSizes)
lib.AtgMiopenRnnOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, out4.ctensor, input.ctensor, cweight, len(cweight), weightStride0, hx.ctensor, cx.ctensor, mode, hiddenSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, batchSizesLen, dropoutState.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mish(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMish(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mish_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMish_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MishBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMishBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MishOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMishOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgMkldnnAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnAdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnAdaptiveAvgPool2dBackwardOut(out *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnAdaptiveAvgPool2dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgMkldnnAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
lib.AtgMkldnnConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnConvolutionOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
lib.AtgMkldnnConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnLinear(weight *Tensor, bias *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnLinear(ptr, ts.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func MkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizeLen := len(inputSize)
lib.AtgMkldnnLinearBackwardInput(ptr, inputSize, inputSizeLen, gradOutput.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func MkldnnLinearBackwardInputOut(out *Tensor, inputSize []int64, gradOutput *Tensor, weight *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizeLen := len(inputSize)
lib.AtgMkldnnLinearBackwardInputOut(ptr, out.ctensor, inputSize, inputSizeLen, gradOutput.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cbiasDefined := int32(0)
if biasDefined { cbiasDefined = int32(1) }
lib.AtgMkldnnLinearBackwardWeights(ctensorPtr0, gradOutput.ctensor, input.ctensor, weight.ctensor, cbiasDefined)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MkldnnLinearBackwardWeightsOut(out0 *Tensor, out1 *Tensor, gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cbiasDefined := int32(0)
if biasDefined { cbiasDefined = int32(1) }
lib.AtgMkldnnLinearBackwardWeightsOut(ctensorPtr0, out0.ctensor, out1.ctensor, gradOutput.ctensor, input.ctensor, weight.ctensor, cbiasDefined)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnLinearOut(out *Tensor, weight *Tensor, bias *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnLinearOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMkldnnMaxPool2d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func MkldnnMaxPool2dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMkldnnMaxPool2dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func MkldnnMaxPool2dBackwardOut(out *Tensor, gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMkldnnMaxPool2dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnMaxPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMkldnnMaxPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMkldnnMaxPool3d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func MkldnnMaxPool3dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMkldnnMaxPool3dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func MkldnnMaxPool3dBackwardOut(out *Tensor, gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMkldnnMaxPool3dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnMaxPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgMkldnnMaxPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, inputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
inputSizeLen := len(inputSize)
lib.AtgMkldnnReorderConv2dWeight(ptr, ts.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, inputSize, inputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnReorderConv2dWeightOut(out *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, inputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
inputSizeLen := len(inputSize)
lib.AtgMkldnnReorderConv2dWeightOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, inputSize, inputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
lib.AtgMkldnnReorderConv3dWeight(ptr, ts.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MkldnnReorderConv3dWeightOut(out *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
strideLen := len(stride)
dilationLen := len(dilation)
lib.AtgMkldnnReorderConv3dWeightOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MkldnnRnnLayer(input *Tensor, weight0 *Tensor, weight1 *Tensor, weight2 *Tensor, weight3 *Tensor, hx_ *Tensor, cx_ *Tensor, reverse bool, batchSizes []int64, mode int64, hiddenSize int64, numLayers int64, hasBiases bool, bidirectional bool, batchFirst bool, train bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
creverse := int32(0)
if reverse { creverse = int32(1) }
batchSizesLen := len(batchSizes)
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgMkldnnRnnLayer(ctensorPtr0, input.ctensor, weight0.ctensor, weight1.ctensor, weight2.ctensor, weight3.ctensor, hx_.ctensor, cx_.ctensor, creverse, batchSizes, batchSizesLen, mode, hiddenSize, numLayers, chasBiases, cbidirectional, cbatchFirst, ctrain)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MkldnnRnnLayerBackward(input *Tensor, weight1 *Tensor, weight2 *Tensor, weight3 *Tensor, weight4 *Tensor, hx_ *Tensor, cxTmp *Tensor, output *Tensor, hy_ *Tensor, cy_ *Tensor, gradOutput *Tensor, gradHy *Tensor, gradCy *Tensor, reverse bool, mode int64, hiddenSize int64, numLayers int64, hasBiases bool, train bool, bidirectional bool, batchSizes []int64, batchFirst bool, workspace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor, retVal6 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr5 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr4)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr6 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr5)) + unsafe.Sizeof(ctensorPtr0)))
creverse := int32(0)
if reverse { creverse = int32(1) }
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
batchSizesLen := len(batchSizes)
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.AtgMkldnnRnnLayerBackward(ctensorPtr0, input.ctensor, weight1.ctensor, weight2.ctensor, weight3.ctensor, weight4.ctensor, hx_.ctensor, cxTmp.ctensor, output.ctensor, hy_.ctensor, cy_.ctensor, gradOutput.ctensor, gradHy.ctensor, gradCy.ctensor, creverse, mode, hiddenSize, numLayers, chasBiases, ctrain, cbidirectional, batchSizes, batchSizesLen, cbatchFirst, workspace.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
retVal5 = &Tensor{ctensor: *ctensorPtr5}
retVal6 = &Tensor{ctensor: *ctensorPtr6}
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MkldnnRnnLayerBackwardOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, out4 *Tensor, out5 *Tensor, out6 *Tensor, input *Tensor, weight1 *Tensor, weight2 *Tensor, weight3 *Tensor, weight4 *Tensor, hx_ *Tensor, cxTmp *Tensor, output *Tensor, hy_ *Tensor, cy_ *Tensor, gradOutput *Tensor, gradHy *Tensor, gradCy *Tensor, reverse bool, mode int64, hiddenSize int64, numLayers int64, hasBiases bool, train bool, bidirectional bool, batchSizes []int64, batchFirst bool, workspace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor, retVal6 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr5 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr4)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr6 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr5)) + unsafe.Sizeof(ctensorPtr0)))
creverse := int32(0)
if reverse { creverse = int32(1) }
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
batchSizesLen := len(batchSizes)
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.AtgMkldnnRnnLayerBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, out4.ctensor, out5.ctensor, out6.ctensor, input.ctensor, weight1.ctensor, weight2.ctensor, weight3.ctensor, weight4.ctensor, hx_.ctensor, cxTmp.ctensor, output.ctensor, hy_.ctensor, cy_.ctensor, gradOutput.ctensor, gradHy.ctensor, gradCy.ctensor, creverse, mode, hiddenSize, numLayers, chasBiases, ctrain, cbidirectional, batchSizes, batchSizesLen, cbatchFirst, workspace.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
retVal4 = &Tensor{ctensor: *ctensorPtr4}
retVal5 = &Tensor{ctensor: *ctensorPtr5}
retVal6 = &Tensor{ctensor: *ctensorPtr6}
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func MkldnnRnnLayerOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, input *Tensor, weight0 *Tensor, weight1 *Tensor, weight2 *Tensor, weight3 *Tensor, hx_ *Tensor, cx_ *Tensor, reverse bool, batchSizes []int64, mode int64, hiddenSize int64, numLayers int64, hasBiases bool, bidirectional bool, batchFirst bool, train bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
creverse := int32(0)
if reverse { creverse = int32(1) }
batchSizesLen := len(batchSizes)
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgMkldnnRnnLayerOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, input.ctensor, weight0.ctensor, weight1.ctensor, weight2.ctensor, weight3.ctensor, hx_.ctensor, cx_.ctensor, creverse, batchSizes, batchSizesLen, mode, hiddenSize, numLayers, chasBiases, cbidirectional, cbatchFirst, ctrain)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
retVal3 = &Tensor{ctensor: *ctensorPtr3}
return retVal0, retVal1, retVal2, retVal3, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mm(mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMm(ptr, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Mode(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgMode(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) ModeValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgModeValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Moveaxis(source []int64, destination []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sourceLen := len(source)
destinationLen := len(destination)
lib.AtgMoveaxis(ptr, ts.ctensor, source, sourceLen, destination, destinationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MoveaxisInt(source int64, destination int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMoveaxisInt(ptr, ts.ctensor, source, destination)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Movedim(source []int64, destination []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sourceLen := len(source)
destinationLen := len(destination)
lib.AtgMovedim(ptr, ts.ctensor, source, sourceLen, destination, destinationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MovedimInt(source int64, destination int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMovedimInt(ptr, ts.ctensor, source, destination)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MseLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMseLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMseLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MseLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMseLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Msort(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMsort(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MsortOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMsortOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mt(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMt(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mul(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mul_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMul_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMulOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MulScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMulScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MulScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMulScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MulScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMulScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultilabelMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultilabelMarginLoss(ptr, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultilabelMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultilabelMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultilabelMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Multinomial(numSamples int64, replacement bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
creplacement := int32(0)
if replacement { creplacement = int32(1) }
lib.AtgMultinomial(ptr, ts.ctensor, numSamples, creplacement)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
creplacement := int32(0)
if replacement { creplacement = int32(1) }
lib.AtgMultinomialOut(ptr, out.ctensor, ts.ctensor, numSamples, creplacement)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Multiply(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiply(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Multiply_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiply_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultiplyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiplyOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultiplyScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiplyScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MultiplyScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiplyScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mv(vec *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMv(ptr, ts.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MvOut(out *Tensor, vec *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMvOut(ptr, out.ctensor, ts.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mvlgamma(p int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMvlgamma(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Mvlgamma_(p int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMvlgamma_(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) MvlgammaOut(out *Tensor, p int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMvlgammaOut(ptr, out.ctensor, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NanToNum(nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnanVal float64 = 0.0
var cnanNull int = 1
if len(nan) > 0 {
cnanVal = nan[0]
cnanNull = 0
}
var cposinfVal float64 = 0.0
var cposinfNull int = 1
if len(posinf) > 0 {
cposinfVal = posinf[0]
cposinfNull = 0
}
var cneginfVal float64 = 0.0
var cneginfNull int = 1
if len(neginf) > 0 {
cneginfVal = neginf[0]
cneginfNull = 0
}
lib.AtgNanToNum(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NanToNum_(nan []float64, posinf []float64, neginf []float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnanVal float64 = 0.0
var cnanNull int = 1
if len(nan) > 0 {
cnanVal = nan[0]
cnanNull = 0
}
var cposinfVal float64 = 0.0
var cposinfNull int = 1
if len(posinf) > 0 {
cposinfVal = posinf[0]
cposinfNull = 0
}
var cneginfVal float64 = 0.0
var cneginfNull int = 1
if len(neginf) > 0 {
cneginfVal = neginf[0]
cneginfNull = 0
}
lib.AtgNanToNum_(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NanToNumOut(out *Tensor, nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnanVal float64 = 0.0
var cnanNull int = 1
if len(nan) > 0 {
cnanVal = nan[0]
cnanNull = 0
}
var cposinfVal float64 = 0.0
var cposinfNull int = 1
if len(posinf) > 0 {
cposinfVal = posinf[0]
cposinfNull = 0
}
var cneginfVal float64 = 0.0
var cneginfNull int = 1
if len(neginf) > 0 {
cneginfVal = neginf[0]
cneginfNull = 0
}
lib.AtgNanToNumOut(ptr, out.ctensor, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Nanmean(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNanmean(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NanmeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNanmeanOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Nanmedian(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNanmedian(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) NanmedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNanmedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) NanmedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNanmedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NanmedianOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNanmedianOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Nanquantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNanquantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NanquantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNanquantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NanquantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNanquantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NanquantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNanquantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Nansum(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNansum(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NansumOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNansumOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Narrow(dim int64, start int64, length int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNarrow(ptr, ts.ctensor, dim, start, length)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NarrowCopy(dim int64, start int64, length int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNarrowCopy(ptr, ts.ctensor, dim, start, length)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NarrowCopyOut(out *Tensor, dim int64, start int64, length int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNarrowCopyOut(ptr, out.ctensor, ts.ctensor, dim, start, length)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NarrowTensor(dim int64, start *Tensor, length int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNarrowTensor(ptr, ts.ctensor, dim, start.ctensor, length)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func NativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgNativeBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func NativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgNativeBatchNormOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NativeChannelShuffle(groups int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNativeChannelShuffle(ptr, ts.ctensor, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func NativeDropout(input *Tensor, p float64, train bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgNativeDropout(ctensorPtr0, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func NativeDropoutBackward(gradOutput *Tensor, mask *Tensor, scale float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNativeDropoutBackward(ptr, gradOutput.ctensor, mask.ctensor, scale)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func NativeDropoutBackwardOut(out *Tensor, gradOutput *Tensor, mask *Tensor, scale float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNativeDropoutBackwardOut(ptr, out.ctensor, gradOutput.ctensor, mask.ctensor, scale)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func NativeDropoutOut(out0 *Tensor, out1 *Tensor, input *Tensor, p float64, train bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctrain := int32(0)
if train { ctrain = int32(1) }
lib.AtgNativeDropoutOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func NativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgNativeGroupNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, n, c, hxW, group, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func NativeGroupNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgNativeGroupNormOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, weight.ctensor, bias.ctensor, n, c, hxW, group, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func NativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
normalizedShapeLen := len(normalizedShape)
lib.AtgNativeLayerNorm(ctensorPtr0, input.ctensor, normalizedShape, normalizedShapeLen, weight.ctensor, bias.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func NativeLayerNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
normalizedShapeLen := len(normalizedShape)
lib.AtgNativeLayerNormOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, normalizedShape, normalizedShapeLen, weight.ctensor, bias.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NativeNorm(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNativeNorm(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NativeNormOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNativeNormOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NativeNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNativeNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NativeNormScalaroptDimDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNativeNormScalaroptDimDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ne(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNe(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ne_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNe_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NeTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NeTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Neg(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeg(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Neg_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeg_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NegOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNegOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Negative(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNegative(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Negative_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNegative_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NegativeOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNegativeOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NestedToPaddedTensor(padding float64, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgNestedToPaddedTensor(ptr, ts.ctensor, padding, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgNewEmpty(ptr, ts.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewEmptyOut(out *Tensor, size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgNewEmptyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
lib.AtgNewEmptyStrided(ptr, ts.ctensor, size, sizeLen, stride, strideLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewEmptyStridedOut(out *Tensor, size []int64, stride []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
lib.AtgNewEmptyStridedOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, stride, strideLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgNewFull(ptr, ts.ctensor, size, sizeLen, fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewFullOut(out *Tensor, size []int64, fillValue *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgNewFullOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, fillValue.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgNewOnes(ptr, ts.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewOnesOut(out *Tensor, size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgNewOnesOut(ptr, out.ctensor, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgNewZeros(ptr, ts.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NewZerosOut(out *Tensor, size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgNewZerosOut(ptr, out.ctensor, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Nextafter(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNextafter(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Nextafter_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNextafter_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NextafterOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNextafterOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss2d(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss2dBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss2dOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLossNd(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLossNd(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Nonzero(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNonzero(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NonzeroOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNonzeroOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Norm(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNorm(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NormDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNormDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func NormExceptDim(v *Tensor, pow int64, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormExceptDim(ptr, v.ctensor, pow, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NormScalarOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormScalarOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NormScalaroptDim(p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNormScalaroptDim(ptr, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormScalaroptDtype(ptr, ts.ctensor, p.cscalar, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NormScalaroptDtypeOut(out *Tensor, p *Scalar, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormScalaroptDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Normal_(mean float64, std float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormal_(ptr, ts.ctensor, mean, std)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NormalFunctional(mean float64, std float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormalFunctional(ptr, ts.ctensor, mean, std)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NotEqual(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqual(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NotEqual_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqual_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NotEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NotEqualTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqualTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NotEqualTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqualTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NotEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NuclearNorm(keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNuclearNorm(ptr, ts.ctensor, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NuclearNormDim(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNuclearNormDim(ptr, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NuclearNormDimOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNuclearNormDimOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NuclearNormOut(out *Tensor, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgNuclearNormOut(ptr, out.ctensor, ts.ctensor, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) NumpyT(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNumpyT(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) OneHot(numClasses int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOneHot(ptr, ts.ctensor, numClasses)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Ones(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgOnes(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) OnesLike(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOnesLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) OnesLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOnesLikeOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func OnesOut(out *Tensor, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgOnesOut(ptr, out.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Orgqr(input2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOrgqr(ptr, ts.ctensor, input2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) OrgqrOut(out *Tensor, input2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOrgqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ormqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cleft := int32(0)
if left { cleft = int32(1) }
ctranspose := int32(0)
if transpose { ctranspose = int32(1) }
lib.AtgOrmqr(ptr, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) OrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cleft := int32(0)
if left { cleft = int32(1) }
ctranspose := int32(0)
if transpose { ctranspose = int32(1) }
lib.AtgOrmqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Outer(vec2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOuter(ptr, ts.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) OuterOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOuterOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) OutputNr(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgOutputNr(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Pad(pad []int64, mode string, value []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
padLen := len(pad)
var cvalueVal float64 = 0.0
var cvalueNull int = 1
if len(value) > 0 {
cvalueVal = value[0]
cvalueNull = 0
}
lib.AtgPad(ptr, ts.ctensor, pad, padLen, mode, cvalueVal, cvalueNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func PadSequence(sequences []*Tensor, batchFirst bool, paddingValue float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var csequences []lib.Ctensor
for _, t := range sequences {csequences = append(csequences, t.ctensor)}
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.AtgPadSequence(ptr, csequences, len(csequences), cbatchFirst, paddingValue)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func PairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgPairwiseDistance(ptr, x1.ctensor, x2.ctensor, p, eps, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Pdist(p float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPdist(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Permute(dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgPermute(ptr, ts.ctensor, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PermuteCopy(dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgPermuteCopy(ptr, ts.ctensor, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PermuteCopyOut(out *Tensor, dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgPermuteCopyOut(ptr, out.ctensor, ts.ctensor, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PinMemory(device gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPinMemory(ptr, ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Pinverse(rcond float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPinverse(ptr, ts.ctensor, rcond)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PixelShuffle(upscaleFactor int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPixelShuffle(ptr, ts.ctensor, upscaleFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PixelShuffleOut(out *Tensor, upscaleFactor int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPixelShuffleOut(ptr, out.ctensor, ts.ctensor, upscaleFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PixelUnshuffle(downscaleFactor int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPixelUnshuffle(ptr, ts.ctensor, downscaleFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PixelUnshuffleOut(out *Tensor, downscaleFactor int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPixelUnshuffleOut(ptr, out.ctensor, ts.ctensor, downscaleFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Poisson(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPoisson(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func PoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
clogInput := int32(0)
if logInput { clogInput = int32(1) }
cfull := int32(0)
if full { cfull = int32(1) }
lib.AtgPoissonNllLoss(ptr, input.ctensor, target.ctensor, clogInput, cfull, eps, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PoissonOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPoissonOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Polar(abs *Tensor, angle *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolar(ptr, abs.ctensor, angle.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func PolarOut(out *Tensor, abs *Tensor, angle *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolarOut(ptr, out.ctensor, abs.ctensor, angle.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Polygamma(n int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolygamma(ptr, n, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Polygamma_(n int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolygamma_(ptr, ts.ctensor, n)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolygammaOut(ptr, out.ctensor, n, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Positive(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPositive(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Pow(exponent *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPow(ptr, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Pow_(exponent *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPow_(ptr, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func PowScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowScalar(ptr, selfScalar.cscalar, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func PowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PowTensor_(exponent *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowTensor_(ptr, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PowTensorScalar(exponent *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowTensorScalar(ptr, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PowTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PowTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Prelu(weight *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPrelu(ptr, ts.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Prod(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgProd(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgProdDimInt(ptr, ts.ctensor, dim, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgProdIntOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ProdOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgProdOut(ptr, out.ctensor, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Put(index *Tensor, source *Tensor, accumulate bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
lib.AtgPut(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Put_(index *Tensor, source *Tensor, accumulate bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
lib.AtgPut_(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) PutOut(out *Tensor, index *Tensor, source *Tensor, accumulate bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
caccumulate := int32(0)
if accumulate { caccumulate = int32(1) }
lib.AtgPutOut(ptr, out.ctensor, ts.ctensor, index.ctensor, source.ctensor, caccumulate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) QPerChannelAxis(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgQPerChannelAxis(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QPerChannelScales(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQPerChannelScales(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QPerChannelScalesOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQPerChannelScalesOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QPerChannelZeroPoints(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQPerChannelZeroPoints(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QPerChannelZeroPointsOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQPerChannelZeroPointsOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `double `:
// --------------------------
func(ts *Tensor) QScale(del bool)(retVal float64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgQScale(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) QZeroPoint(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgQZeroPoint(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Qr(some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
csome := int32(0)
if some { csome = int32(1) }
lib.AtgQr(ctensorPtr0, ts.ctensor, csome)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) QrQ(q *Tensor, r *Tensor, some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
csome := int32(0)
if some { csome = int32(1) }
lib.AtgQrQ(ctensorPtr0, q.ctensor, r.ctensor, ts.ctensor, csome)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Quantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgQuantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgQuantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgQuantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgQuantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizePerChannel(ptr, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizePerChannelOut(out *Tensor, scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizePerChannelOut(ptr, out.ctensor, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizePerTensor(ptr, ts.ctensor, scale, zeroPoint, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizePerTensorDynamic(dtype gotch.DType, reduceRange bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
creduceRange := int32(0)
if reduceRange { creduceRange = int32(1) }
lib.AtgQuantizePerTensorDynamic(ptr, ts.ctensor, dtype.CInt(), creduceRange)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizePerTensorDynamicOut(out *Tensor, dtype gotch.DType, reduceRange bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
creduceRange := int32(0)
if reduceRange { creduceRange = int32(1) }
lib.AtgQuantizePerTensorDynamicOut(ptr, out.ctensor, ts.ctensor, dtype.CInt(), creduceRange)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizePerTensorTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func QuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func QuantizedBatchNormOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedBatchNormOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func QuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func QuantizedLstmCell(input *Tensor, hx []*Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {chx = append(chx, t.ctensor)}
lib.AtgQuantizedLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgQuantizedMaxPool1d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizedMaxPool1dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgQuantizedMaxPool1dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgQuantizedMaxPool2d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) QuantizedMaxPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
cceilMode := int32(0)
if ceilMode { cceilMode = int32(1) }
lib.AtgQuantizedMaxPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func QuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func QuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rad2deg(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRad2deg(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rad2deg_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRad2deg_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rad2degOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRad2degOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgRand(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandLike(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandLikeOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RandOut(out *Tensor, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgRandOut(ptr, out.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Randint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgRandint(ptr, high, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandintLike(high int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintLike(ptr, ts.ctensor, high)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandintLikeLowDtype(low int64, high int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintLikeLowDtype(ptr, ts.ctensor, low, high)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandintLikeLowDtypeOut(out *Tensor, low int64, high int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintLikeLowDtypeOut(ptr, out.ctensor, ts.ctensor, low, high)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandintLikeOut(out *Tensor, high int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintLikeOut(ptr, out.ctensor, ts.ctensor, high)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgRandintLow(ptr, low, high, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RandintLowOut(out *Tensor, low int64, high int64, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgRandintLowOut(ptr, out.ctensor, low, high, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RandintOut(out *Tensor, high int64, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgRandintOut(ptr, out.ctensor, high, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Randn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgRandn(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandnLike(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandnLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandnLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandnLikeOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RandnOut(out *Tensor, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgRandnOut(ptr, out.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Random(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandom(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Random_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandom_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandomFrom(from int64, to []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctoVal int64 = 0
var ctoNull int = 1
if len(to) > 0 {
ctoVal = to[0]
ctoNull = 0
}
lib.AtgRandomFrom(ptr, ts.ctensor, from, ctoVal, ctoNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandomFrom_(from int64, to []int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctoVal int64 = 0
var ctoNull int = 1
if len(to) > 0 {
ctoVal = to[0]
ctoNull = 0
}
lib.AtgRandomFrom_(ptr, ts.ctensor, from, ctoVal, ctoNull)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandomFromOut(out *Tensor, from int64, to []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctoVal int64 = 0
var ctoNull int = 1
if len(to) > 0 {
ctoVal = to[0]
ctoNull = 0
}
lib.AtgRandomFromOut(ptr, out.ctensor, ts.ctensor, from, ctoVal, ctoNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandomOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandomOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandomTo(to int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandomTo(ptr, ts.ctensor, to)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandomTo_(to int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandomTo_(ptr, ts.ctensor, to)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RandomToOut(out *Tensor, to int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandomToOut(ptr, out.ctensor, ts.ctensor, to)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Randperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandperm(ptr, n, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RandpermOut(out *Tensor, n int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandpermOut(ptr, out.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Range(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRange(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RangeOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRangeOut(ptr, out.ctensor, start.cscalar, end.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RangeOut_(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRangeOut_(ptr, out.ctensor, start.cscalar, end.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRangeStep(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Ravel(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRavel(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Real(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReal(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Reciprocal(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReciprocal(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Reciprocal_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReciprocal_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReciprocalOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReciprocalOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad1d(padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad1d(ptr, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad1dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad2d(padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad2d(ptr, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad2dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad3d(padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad3d(ptr, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReflectionPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReflectionPad3dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Relu(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Relu6(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRelu6(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Relu6_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRelu6_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Relu_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRelu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReluOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Remainder(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainder(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Remainder_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainder_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RemainderScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RemainderScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RemainderScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RemainderTensor(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RemainderTensor_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RemainderTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Renorm(p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRenorm(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Renorm_(p *Scalar, dim int64, maxnorm *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRenorm_(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRenormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, maxnorm.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Repeat(repeats []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
repeatsLen := len(repeats)
lib.AtgRepeat(ptr, ts.ctensor, repeats, repeatsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RepeatInterleave(repeats *Tensor, outputSize []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var coutputSizeVal int64 = 0
var coutputSizeNull int = 1
if len(outputSize) > 0 {
coutputSizeVal = outputSize[0]
coutputSizeNull = 0
}
lib.AtgRepeatInterleave(ptr, repeats.ctensor, coutputSizeVal, coutputSizeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RepeatInterleaveSelfInt(repeats int64, dim []int64, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
var coutputSizeVal int64 = 0
var coutputSizeNull int = 1
if len(outputSize) > 0 {
coutputSizeVal = outputSize[0]
coutputSizeNull = 0
}
lib.AtgRepeatInterleaveSelfInt(ptr, ts.ctensor, repeats, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RepeatInterleaveSelfTensor(repeats *Tensor, dim []int64, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
var coutputSizeVal int64 = 0
var coutputSizeNull int = 1
if len(outputSize) > 0 {
coutputSizeVal = outputSize[0]
coutputSizeNull = 0
}
lib.AtgRepeatInterleaveSelfTensor(ptr, ts.ctensor, repeats.ctensor, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RepeatInterleaveTensorOut(out *Tensor, repeats *Tensor, outputSize []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var coutputSizeVal int64 = 0
var coutputSizeNull int = 1
if len(outputSize) > 0 {
coutputSizeVal = outputSize[0]
coutputSizeNull = 0
}
lib.AtgRepeatInterleaveTensorOut(ptr, out.ctensor, repeats.ctensor, coutputSizeVal, coutputSizeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RepeatOut(out *Tensor, repeats []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
repeatsLen := len(repeats)
lib.AtgRepeatOut(ptr, out.ctensor, ts.ctensor, repeats, repeatsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad1d(padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad1d(ptr, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad1dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad2d(padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad2d(ptr, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad2dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad3d(padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad3d(ptr, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReplicationPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
paddingLen := len(padding)
lib.AtgReplicationPad3dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RequiresGrad_(requiresGrad bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
crequiresGrad := int32(0)
if requiresGrad { crequiresGrad = int32(1) }
lib.AtgRequiresGrad_(ptr, ts.ctensor, crequiresGrad)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Reshape(shape []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
shapeLen := len(shape)
lib.AtgReshape(ptr, ts.ctensor, shape, shapeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ReshapeAs(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReshapeAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Resize(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgResize(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Resize_(size []int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgResize_(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResizeAs(theTemplate *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResizeAs(ptr, ts.ctensor, theTemplate.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResizeAs_(theTemplate *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResizeAs_(ptr, ts.ctensor, theTemplate.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResizeAsOut(out *Tensor, theTemplate *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResizeAsOut(ptr, out.ctensor, ts.ctensor, theTemplate.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResizeAsSparse(theTemplate *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResizeAsSparse(ptr, ts.ctensor, theTemplate.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResizeAsSparse_(theTemplate *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResizeAsSparse_(ptr, ts.ctensor, theTemplate.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResizeAsSparseOut(out *Tensor, theTemplate *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResizeAsSparseOut(ptr, out.ctensor, ts.ctensor, theTemplate.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResizeOut(out *Tensor, size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgResizeOut(ptr, out.ctensor, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResolveConj(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResolveConj(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ResolveNeg(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResolveNeg(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `bool`:
// --------------------------
func(ts *Tensor) RetainsGrad(del bool)(retVal bool, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgRetainsGrad(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func RnnRelu(input *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.AtgRnnRelu(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func RnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func RnnReluData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
lib.AtgRnnReluData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func RnnTanh(input *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
cbatchFirst := int32(0)
if batchFirst { cbatchFirst = int32(1) }
lib.AtgRnnTanh(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func RnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func RnnTanhData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {cparams = append(cparams, t.ctensor)}
chasBiases := int32(0)
if hasBiases { chasBiases = int32(1) }
ctrain := int32(0)
if train { ctrain = int32(1) }
cbidirectional := int32(0)
if bidirectional { cbidirectional = int32(1) }
lib.AtgRnnTanhData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Roll(shifts []int64, dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
shiftsLen := len(shifts)
dimsLen := len(dims)
lib.AtgRoll(ptr, ts.ctensor, shifts, shiftsLen, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RollOut(out *Tensor, shifts []int64, dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
shiftsLen := len(shifts)
dimsLen := len(dims)
lib.AtgRollOut(ptr, out.ctensor, ts.ctensor, shifts, shiftsLen, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rot90(k int64, dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgRot90(ptr, ts.ctensor, k, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rot90Out(out *Tensor, k int64, dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgRot90Out(ptr, out.ctensor, ts.ctensor, k, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Round(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRound(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Round_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRound_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RoundDecimals(decimals int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoundDecimals(ptr, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RoundDecimals_(decimals int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoundDecimals_(ptr, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RoundDecimalsOut(out *Tensor, decimals int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoundDecimalsOut(ptr, out.ctensor, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RoundOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoundOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RowIndices(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRowIndices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RowIndicesCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRowIndicesCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RowIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRowIndicesCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RowStack(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgRowStack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func RowStackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgRowStackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rrelu(training bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgRrelu(ptr, ts.ctensor, ctraining)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rrelu_(training bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgRrelu_(ptr, ts.ctensor, ctraining)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RreluWithNoise(noise *Tensor, training bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgRreluWithNoise(ptr, ts.ctensor, noise.ctensor, ctraining)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RreluWithNoise_(noise *Tensor, training bool)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgRreluWithNoise_(ptr, ts.ctensor, noise.ctensor, ctraining)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
cselfIsResult := int32(0)
if selfIsResult { cselfIsResult = int32(1) }
lib.AtgRreluWithNoiseBackward(ptr, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RreluWithNoiseBackwardOut(out *Tensor, gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
cselfIsResult := int32(0)
if selfIsResult { cselfIsResult = int32(1) }
lib.AtgRreluWithNoiseBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training { ctraining = int32(1) }
lib.AtgRreluWithNoiseOut(ptr, out.ctensor, ts.ctensor, noise.ctensor, ctraining)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rsqrt(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsqrt(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rsqrt_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsqrt_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RsqrtOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsqrtOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Rsub(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsub(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RsubScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsubScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RsubScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsubScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) RsubTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsubTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScalarTensor(ptr, s.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ScalarTensorOut(out *Tensor, s *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScalarTensorOut(ptr, out.ctensor, s.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ScaledDotProductAttention(query *Tensor, key *Tensor, value *Tensor, attnMask *Tensor, dropoutP float64, isCausal bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cisCausal := int32(0)
if isCausal { cisCausal = int32(1) }
lib.AtgScaledDotProductAttention(ptr, query.ctensor, key.ctensor, value.ctensor, attnMask.ctensor, dropoutP, cisCausal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Scatter(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatter(ptr, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Scatter_(dim int64, index *Tensor, src *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatter_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterAdd(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterAdd(ptr, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterAdd_(dim int64, index *Tensor, src *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterAdd_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterReduce(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterReduce_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterSrcOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterValue(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValue(ptr, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterValue_(dim int64, index *Tensor, value *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValue_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterValueOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValueOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterValueReduce(dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValueReduce(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterValueReduce_(dim int64, index *Tensor, value *Scalar, reduce string)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValueReduce_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValueReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Searchsorted(sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
cright := int32(0)
if right { cright = int32(1) }
lib.AtgSearchsorted(ptr, sortedSequence.ctensor, ts.ctensor, coutInt32, cright, side, sorter.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SearchsortedScalar(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool, side string, sorter *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
cright := int32(0)
if right { cright = int32(1) }
lib.AtgSearchsortedScalar(ptr, sortedSequence.ctensor, selfScalar.cscalar, coutInt32, cright, side, sorter.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SearchsortedScalarOut(out *Tensor, sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool, side string, sorter *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
cright := int32(0)
if right { cright = int32(1) }
lib.AtgSearchsortedScalarOut(ptr, out.ctensor, sortedSequence.ctensor, selfScalar.cscalar, coutInt32, cright, side, sorter.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SearchsortedTensorOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 { coutInt32 = int32(1) }
cright := int32(0)
if right { cright = int32(1) }
lib.AtgSearchsortedTensorOut(ptr, out.ctensor, sortedSequence.ctensor, ts.ctensor, coutInt32, cright, side, sorter.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor, offsets *Tensor, axis int64, unsafety bool, initial *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunsafety := int32(0)
if unsafety { cunsafety = int32(1) }
lib.AtgSegmentReduce(ptr, data.ctensor, reduce, lengths.ctensor, indices.ctensor, offsets.ctensor, axis, cunsafety, initial.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SegmentReduceOut(out *Tensor, data *Tensor, reduce string, lengths *Tensor, indices *Tensor, offsets *Tensor, axis int64, unsafety bool, initial *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunsafety := int32(0)
if unsafety { cunsafety = int32(1) }
lib.AtgSegmentReduceOut(ptr, out.ctensor, data.ctensor, reduce, lengths.ctensor, indices.ctensor, offsets.ctensor, axis, cunsafety, initial.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Select(dim int64, index int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelect(ptr, ts.ctensor, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizesLen := len(inputSizes)
lib.AtgSelectBackward(ptr, gradOutput.ctensor, inputSizes, inputSizesLen, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SelectBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, dim int64, index int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizesLen := len(inputSizes)
lib.AtgSelectBackwardOut(ptr, out.ctensor, gradOutput.ctensor, inputSizes, inputSizesLen, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SelectCopy(dim int64, index int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelectCopy(ptr, ts.ctensor, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SelectCopyIntOut(out *Tensor, dim int64, index int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelectCopyIntOut(ptr, out.ctensor, ts.ctensor, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SelectScatter(src *Tensor, dim int64, index int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelectScatter(ptr, ts.ctensor, src.ctensor, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SelectScatterOut(out *Tensor, src *Tensor, dim int64, index int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelectScatterOut(ptr, out.ctensor, ts.ctensor, src.ctensor, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Selu(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Selu_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Set(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSet(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Set_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSet_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SetOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSetOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SetRequiresGrad(r bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cr := int32(0)
if r { cr = int32(1) }
lib.AtgSetRequiresGrad(ptr, ts.ctensor, cr)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SetSourceTensor(source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSetSourceTensor(ptr, ts.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SetSourceTensor_(source *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSetSourceTensor_(ptr, ts.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SetSourceTensorOut(out *Tensor, source *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSetSourceTensorOut(ptr, out.ctensor, ts.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SetSourceTensorStorageOffset_(source *Tensor, storageOffset int64, size []int64, stride []int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
strideLen := len(stride)
lib.AtgSetSourceTensorStorageOffset_(ptr, ts.ctensor, source.ctensor, storageOffset, size, sizeLen, stride, strideLen)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sgn(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSgn(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sgn_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSgn_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SgnOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSgnOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sigmoid(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoid(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sigmoid_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoid_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func SigmoidBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoidBackward(ptr, gradOutput.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoidOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sign(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSign(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sign_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSign_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SignOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSignOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Signbit(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSignbit(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SignbitOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSignbitOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Silu(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSilu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Silu_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSilu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SiluBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSiluBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSiluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SiluOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSiluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sin(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSin(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sin_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSin_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SinOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sinc(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sinc_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinc_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SincOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSincOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sinh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sinh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SinhOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Slice(dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstartVal int64 = 0
var cstartNull int = 1
if len(start) > 0 {
cstartVal = start[0]
cstartNull = 0
}
var cendVal int64 = 0
var cendNull int = 1
if len(end) > 0 {
cendVal = end[0]
cendNull = 0
}
lib.AtgSlice(ptr, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizesLen := len(inputSizes)
lib.AtgSliceBackward(ptr, gradOutput.ctensor, inputSizes, inputSizesLen, dim, start, end, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SliceBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizesLen := len(inputSizes)
lib.AtgSliceBackwardOut(ptr, out.ctensor, gradOutput.ctensor, inputSizes, inputSizesLen, dim, start, end, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SliceCopy(dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstartVal int64 = 0
var cstartNull int = 1
if len(start) > 0 {
cstartVal = start[0]
cstartNull = 0
}
var cendVal int64 = 0
var cendNull int = 1
if len(end) > 0 {
cendVal = end[0]
cendNull = 0
}
lib.AtgSliceCopy(ptr, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SliceCopyTensorOut(out *Tensor, dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstartVal int64 = 0
var cstartNull int = 1
if len(start) > 0 {
cstartVal = start[0]
cstartNull = 0
}
var cendVal int64 = 0
var cendNull int = 1
if len(end) > 0 {
cendVal = end[0]
cendNull = 0
}
lib.AtgSliceCopyTensorOut(ptr, out.ctensor, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SliceScatter(src *Tensor, dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstartVal int64 = 0
var cstartNull int = 1
if len(start) > 0 {
cstartVal = start[0]
cstartNull = 0
}
var cendVal int64 = 0
var cendNull int = 1
if len(end) > 0 {
cendVal = end[0]
cendNull = 0
}
lib.AtgSliceScatter(ptr, ts.ctensor, src.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SliceScatterOut(out *Tensor, src *Tensor, dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstartVal int64 = 0
var cstartNull int = 1
if len(start) > 0 {
cstartVal = start[0]
cstartNull = 0
}
var cendVal int64 = 0
var cendNull int = 1
if len(end) > 0 {
cendVal = end[0]
cendNull = 0
}
lib.AtgSliceScatterOut(ptr, out.ctensor, ts.ctensor, src.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Slogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgSlogdet(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) SlogdetOut(sign *Tensor, logabsdet *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgSlogdetOut(ctensorPtr0, sign.ctensor, logabsdet.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
lib.AtgSlowConv3d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
lib.AtgSlowConv3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgSlowConvDilated2d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConvDilated2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgSlowConvDilated2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgSlowConvDilated3d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConvDilated3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
dilationLen := len(dilation)
lib.AtgSlowConvDilated3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
dilationLen := len(dilation)
lib.AtgSlowConvTranspose2d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
dilationLen := len(dilation)
lib.AtgSlowConvTranspose2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
dilationLen := len(dilation)
lib.AtgSlowConvTranspose3d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
kernelSizeLen := len(kernelSize)
strideLen := len(stride)
paddingLen := len(padding)
outputPaddingLen := len(outputPadding)
dilationLen := len(dilation)
lib.AtgSlowConvTranspose3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, dilation, dilationLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Smm(mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmm(ptr, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmoothL1LossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftMarginLoss(ptr, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Softmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftmaxIntOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftmaxIntOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Softplus(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftplus(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftplusBackward(ptr, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftplusBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftplusOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftplusOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Softshrink(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftshrink(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftshrinkBackward(ptr, gradOutput.ctensor, ts.ctensor, lambd.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, lambd.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SoftshrinkOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftshrinkOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Sort(dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cdescending := int32(0)
if descending { cdescending = int32(1) }
lib.AtgSort(ctensorPtr0, ts.ctensor, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) SortStable(stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cstable := int32(0)
if stable { cstable = int32(1) }
cdescending := int32(0)
if descending { cdescending = int32(1) }
lib.AtgSortStable(ctensorPtr0, ts.ctensor, cstable, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) SortValues(values *Tensor, indices *Tensor, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cdescending := int32(0)
if descending { cdescending = int32(1) }
lib.AtgSortValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) SortValuesStable(values *Tensor, indices *Tensor, stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cstable := int32(0)
if stable { cstable = int32(1) }
cdescending := int32(0)
if descending { cdescending = int32(1) }
lib.AtgSortValuesStable(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, cstable, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseBscTensor(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseBscTensor(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseBscTensorCcolRowValueSize(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseBscTensorCcolRowValueSize(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseBsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseBsrTensor(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseBsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseBsrTensorCrowColValueSize(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCompressedTensor(compressedIndices *Tensor, plainIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCompressedTensor(ptr, compressedIndices.ctensor, plainIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCompressedTensorCompPlainValueSize(compressedIndices *Tensor, plainIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseCompressedTensorCompPlainValueSize(ptr, compressedIndices.ctensor, plainIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseCooTensor(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCooTensorIndices(ptr, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseCooTensorIndicesSize(ptr, indices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCooTensorSizeOut(out *Tensor, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseCooTensorSizeOut(ptr, out.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCscTensor(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCscTensor(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCscTensorCcolRowValueSize(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseCscTensorCcolRowValueSize(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCsrTensor(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseCsrTensorCrowColValueSize(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `int64`:
// --------------------------
func(ts *Tensor) SparseDim(del bool)(retVal int64, err error) {
if del { defer ts.MustDrop() }
retVal = lib.AtgSparseDim(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseMask(mask *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseMask(ptr, ts.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseMaskOut(out *Tensor, mask *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseMaskOut(ptr, out.ctensor, ts.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseResize(size []int64, sparseDim int64, denseDim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseResize(ptr, ts.ctensor, size, sizeLen, sparseDim, denseDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseResize_(size []int64, sparseDim int64, denseDim int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseResize_(ptr, ts.ctensor, size, sizeLen, sparseDim, denseDim)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseResizeAndClear(size []int64, sparseDim int64, denseDim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseResizeAndClear(ptr, ts.ctensor, size, sizeLen, sparseDim, denseDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseResizeAndClear_(ptr, ts.ctensor, size, sizeLen, sparseDim, denseDim)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseResizeAndClearOut(out *Tensor, size []int64, sparseDim int64, denseDim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseResizeAndClearOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, sparseDim, denseDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseResizeOut(out *Tensor, size []int64, sparseDim int64, denseDim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSparseResizeOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, sparseDim, denseDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseSampledAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseSampledAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SparseSampledAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseSampledAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialAiryAi(x *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialAiryAi(ptr, x.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialAiryAiOut(out *Tensor, x *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialAiryAiOut(ptr, out.ctensor, x.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialBesselJ0(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialBesselJ0(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialBesselJ0Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialBesselJ0Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialBesselJ1(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialBesselJ1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialBesselJ1Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialBesselJ1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialBesselY0(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialBesselY0(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialBesselY0Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialBesselY0Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialBesselY1(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialBesselY1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialBesselY1Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialBesselY1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialT(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialT(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialTNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialTNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialTNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialTNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialTOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialTOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialTXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialTXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialTXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialTXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialU(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialU(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialUNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialUNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialUNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialUNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialUOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialUOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialUXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialUXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialUXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialUXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialV(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialV(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialVNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialVNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialVNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialVNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialVOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialVOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialVXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialVXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialVXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialVXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialW(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialW(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialWNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialWNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialWNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialWNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialWOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialWOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialWXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialWXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialChebyshevPolynomialWXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialChebyshevPolynomialWXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialDigamma(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialDigamma(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialDigammaOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialDigammaOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialEntr(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialEntr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialEntrOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialEntrOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialErf(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialErfOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialErfc(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialErfcOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfcOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialErfcx(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfcx(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialErfcxOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfcxOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialErfinv(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfinv(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialErfinvOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfinvOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialExp2(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExp2(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialExp2Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExp2Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialExpit(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExpit(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialExpitOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExpitOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialExpm1(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExpm1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialExpm1Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExpm1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialGammainc(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammainc(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialGammaincOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammaincOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialGammaincc(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammaincc(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialGammainccOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammainccOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialGammaln(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammaln(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialGammalnOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammalnOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialH(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialH(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHe(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHe(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHeNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHeNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHeNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHeNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHeOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHeOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHeXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHeXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialHermitePolynomialHeXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialHermitePolynomialHeXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialI0(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI0(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialI0Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI0Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialI0e(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI0e(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialI0eOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI0eOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialI1(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialI1Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialI1e(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI1e(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialI1eOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI1eOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLaguerrePolynomialL(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLaguerrePolynomialL(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLaguerrePolynomialLNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLaguerrePolynomialLNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLaguerrePolynomialLNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLaguerrePolynomialLNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLaguerrePolynomialLOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLaguerrePolynomialLOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLaguerrePolynomialLXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLaguerrePolynomialLXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLaguerrePolynomialLXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLaguerrePolynomialLXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLegendrePolynomialP(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLegendrePolynomialP(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLegendrePolynomialPNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLegendrePolynomialPNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLegendrePolynomialPNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLegendrePolynomialPNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLegendrePolynomialPOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLegendrePolynomialPOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLegendrePolynomialPXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLegendrePolynomialPXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialLegendrePolynomialPXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLegendrePolynomialPXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLog1p(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLog1p(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLog1pOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLog1pOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLogNdtr(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLogNdtr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLogNdtrOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLogNdtrOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLogit(eps []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgSpecialLogit(ptr, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgSpecialLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgSpecialLogsumexp(ptr, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgSpecialLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialModifiedBesselI0(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialModifiedBesselI0(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialModifiedBesselI0Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialModifiedBesselI0Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialModifiedBesselI1(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialModifiedBesselI1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialModifiedBesselI1Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialModifiedBesselI1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialModifiedBesselK0(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialModifiedBesselK0(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialModifiedBesselK0Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialModifiedBesselK0Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialModifiedBesselK1(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialModifiedBesselK1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialModifiedBesselK1Out(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialModifiedBesselK1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialMultigammaln(p int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialMultigammaln(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialMultigammalnOut(out *Tensor, p int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialMultigammalnOut(ptr, out.ctensor, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialNdtr(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialNdtr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialNdtrOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialNdtrOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialNdtri(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialNdtri(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialNdtriOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialNdtriOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialPolygamma(n int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialPolygamma(ptr, n, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialPolygammaOut(ptr, out.ctensor, n, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialPsi(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialPsi(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialPsiOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialPsiOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialRound(decimals int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialRound(ptr, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialRoundOut(out *Tensor, decimals int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialRoundOut(ptr, out.ctensor, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialScaledModifiedBesselK0(x *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialScaledModifiedBesselK0(ptr, x.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialScaledModifiedBesselK0Out(out *Tensor, x *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialScaledModifiedBesselK0Out(ptr, out.ctensor, x.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialScaledModifiedBesselK1(x *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialScaledModifiedBesselK1(ptr, x.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialScaledModifiedBesselK1Out(out *Tensor, x *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialScaledModifiedBesselK1Out(ptr, out.ctensor, x.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialT(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialT(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialTNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialTNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialTNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialTNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialTOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialTOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialTXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialTXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialTXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialTXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialU(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialU(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialUNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialUNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialUNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialUNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialUOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialUOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialUXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialUXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialUXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialUXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialV(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialV(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialVNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialVNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialVNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialVNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialVOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialVOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialVXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialVXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialVXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialVXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialW(x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialW(ptr, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialWNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialWNScalar(ptr, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialWNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialWNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialWOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialWOut(ptr, out.ctensor, x.ctensor, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialWXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialWXScalar(ptr, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialShiftedChebyshevPolynomialWXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialShiftedChebyshevPolynomialWXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialSinc(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialSinc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialSincOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialSincOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialSoftmax(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialSphericalBesselJ0(x *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialSphericalBesselJ0(ptr, x.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialSphericalBesselJ0Out(out *Tensor, x *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialSphericalBesselJ0Out(ptr, out.ctensor, x.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialXlog1py(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1py(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialXlog1pyOtherScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pyOtherScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialXlog1pyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pyOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pySelfScalar(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialXlogy(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogy(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialXlogyOtherScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogyOtherScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialXlogyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogyOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogySelfScalar(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialZeta(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZeta(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialZetaOtherScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaOtherScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SpecialZetaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaSelfScalar(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func SpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaSelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sqrt(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqrt(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sqrt_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqrt_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqrtOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqrtOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Square(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSquare(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Square_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSquare_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SquareOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSquareOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Squeeze(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueeze(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Squeeze_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueeze_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueezeCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeCopyDim(dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueezeCopyDim(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeCopyDimOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueezeCopyDimOut(ptr, out.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeCopyDims(dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.AtgSqueezeCopyDims(ptr, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeCopyDimsOut(out *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.AtgSqueezeCopyDimsOut(ptr, out.ctensor, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueezeCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeDim(dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueezeDim(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeDim_(dim int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueezeDim_(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeDims(dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.AtgSqueezeDims(ptr, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SqueezeDims_(dim []int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
lib.AtgSqueezeDims_(ptr, ts.ctensor, dim, dimLen)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sspaddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSspaddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSspaddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Stack(tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgStack(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func StackOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgStackOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Std(unbiased bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
lib.AtgStd(ptr, ts.ctensor, cunbiased)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) StdCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgStdCorrection(ptr, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) StdCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgStdCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) StdDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgStdDim(ptr, ts.ctensor, dim, dimLen, cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) StdMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
lib.AtgStdMean(ctensorPtr0, ts.ctensor, cunbiased)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) StdMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
dimLen := len(dim)
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgStdMeanCorrection(ctensorPtr0, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) StdMeanCorrectionOut(out0 *Tensor, out1 *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
dimLen := len(dim)
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgStdMeanCorrectionOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) StdMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
dimLen := len(dim)
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgStdMeanDim(ctensorPtr0, ts.ctensor, dim, dimLen, cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) StdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgStdOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Stft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var chopLengthVal int64 = 0
var chopLengthNull int = 1
if len(hopLength) > 0 {
chopLengthVal = hopLength[0]
chopLengthNull = 0
}
var cwinLengthVal int64 = 0
var cwinLengthNull int = 1
if len(winLength) > 0 {
cwinLengthVal = winLength[0]
cwinLengthNull = 0
}
cnormalized := int32(0)
if normalized { cnormalized = int32(1) }
conesided := int32(0)
if onesided { conesided = int32(1) }
creturnComplex := int32(0)
if returnComplex { creturnComplex = int32(1) }
lib.AtgStft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, cnormalized, conesided, creturnComplex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) StftCenter(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, padMode string, normalized bool, onesided bool, returnComplex bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var chopLengthVal int64 = 0
var chopLengthNull int = 1
if len(hopLength) > 0 {
chopLengthVal = hopLength[0]
chopLengthNull = 0
}
var cwinLengthVal int64 = 0
var cwinLengthNull int = 1
if len(winLength) > 0 {
cwinLengthVal = winLength[0]
cwinLengthNull = 0
}
ccenter := int32(0)
if center { ccenter = int32(1) }
cnormalized := int32(0)
if normalized { cnormalized = int32(1) }
conesided := int32(0)
if onesided { conesided = int32(1) }
creturnComplex := int32(0)
if returnComplex { creturnComplex = int32(1) }
lib.AtgStftCenter(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, ccenter, padMode, cnormalized, conesided, creturnComplex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sub(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSub(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sub_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSub_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SubOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SubScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SubScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SubScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Subtract(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtract(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Subtract_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtract_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SubtractOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtractOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SubtractScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtractScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SubtractScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtractScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Sum(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSum(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgSumDimIntlist(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgSumIntlistOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SumOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSumOut(ptr, out.ctensor, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) SumToSize(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgSumToSize(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Svd(some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csome := int32(0)
if some { csome = int32(1) }
ccomputeUv := int32(0)
if computeUv { ccomputeUv = int32(1) }
lib.AtgSvd(ctensorPtr0, ts.ctensor, csome, ccomputeUv)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) SvdU(u *Tensor, s *Tensor, v *Tensor, some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csome := int32(0)
if some { csome = int32(1) }
ccomputeUv := int32(0)
if computeUv { ccomputeUv = int32(1) }
lib.AtgSvdU(ctensorPtr0, u.ctensor, s.ctensor, v.ctensor, ts.ctensor, csome, ccomputeUv)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Swapaxes(axis0 int64, axis1 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSwapaxes(ptr, ts.ctensor, axis0, axis1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Swapaxes_(axis0 int64, axis1 int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSwapaxes_(ptr, ts.ctensor, axis0, axis1)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Swapdims(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSwapdims(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Swapdims_(dim0 int64, dim1 int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSwapdims_(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) T(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgT(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) T_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgT_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Take(index *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTake(ptr, ts.ctensor, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TakeAlongDim(indices *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgTakeAlongDim(ptr, ts.ctensor, indices.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgTakeAlongDimOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TakeOut(out *Tensor, index *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTakeOut(ptr, out.ctensor, ts.ctensor, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Tan(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTan(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Tan_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTan_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TanOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Tanh(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Tanh_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func TanhBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanhBackward(ptr, gradOutput.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func TanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TanhOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Tensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsSelfLen := len(dimsSelf)
dimsOtherLen := len(dimsOther)
lib.AtgTensordot(ptr, ts.ctensor, other.ctensor, dimsSelf, dimsSelfLen, dimsOther, dimsOtherLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsSelfLen := len(dimsSelf)
dimsOtherLen := len(dimsOther)
lib.AtgTensordotOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dimsSelf, dimsSelfLen, dimsOther, dimsOtherLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Threshold(threshold *Scalar, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThreshold(ptr, ts.ctensor, threshold.cscalar, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Threshold_(threshold *Scalar, value *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThreshold_(ptr, ts.ctensor, threshold.cscalar, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThresholdBackward(ptr, gradOutput.ctensor, ts.ctensor, threshold.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThresholdBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, threshold.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThresholdOut(ptr, out.ctensor, ts.ctensor, threshold.cscalar, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Tile(dims []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimsLen := len(dims)
lib.AtgTile(ptr, ts.ctensor, dims, dimsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) To(device gotch.Device, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTo(ptr, ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToDense(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToDense(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ToDenseBackward(grad *Tensor, input *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToDenseBackward(ptr, grad.ctensor, input.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToDevice(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
ccopy := int32(0)
if copy { ccopy = int32(1) }
lib.AtgToDevice(ptr, ts.ctensor, device.CInt(), dtype.CInt(), cnonBlocking, ccopy)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToDtype(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
ccopy := int32(0)
if copy { ccopy = int32(1) }
lib.AtgToDtype(ptr, ts.ctensor, dtype.CInt(), cnonBlocking, ccopy)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToDtypeLayout(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
ccopy := int32(0)
if copy { ccopy = int32(1) }
lib.AtgToDtypeLayout(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking, ccopy)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToMkldnn(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToMkldnn(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ToMkldnnBackward(grad *Tensor, input *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToMkldnnBackward(ptr, grad.ctensor, input.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToMkldnnOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToMkldnnOut(ptr, out.ctensor, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToOther(other *Tensor, nonBlocking bool, copy bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking { cnonBlocking = int32(1) }
ccopy := int32(0)
if copy { ccopy = int32(1) }
lib.AtgToOther(ptr, ts.ctensor, other.ctensor, cnonBlocking, ccopy)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToPaddedTensor(padding float64, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgToPaddedTensor(ptr, ts.ctensor, padding, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToPaddedTensorOut(out *Tensor, padding float64, outputSize []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
lib.AtgToPaddedTensorOut(ptr, out.ctensor, ts.ctensor, padding, outputSize, outputSizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparse(layout Layout, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
blocksizeLen := len(blocksize)
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparse(ptr, ts.ctensor, int8(layout), blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseBsc(blocksize []int64, denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
blocksizeLen := len(blocksize)
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseBsc(ptr, ts.ctensor, blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseBscOut(out *Tensor, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
blocksizeLen := len(blocksize)
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseBscOut(ptr, out.ctensor, ts.ctensor, blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseBsr(blocksize []int64, denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
blocksizeLen := len(blocksize)
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseBsr(ptr, ts.ctensor, blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseBsrOut(out *Tensor, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
blocksizeLen := len(blocksize)
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseBsrOut(ptr, out.ctensor, ts.ctensor, blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseCsc(denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseCsc(ptr, ts.ctensor, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseCscOut(out *Tensor, denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseCscOut(ptr, out.ctensor, ts.ctensor, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseCsr(denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseCsr(ptr, ts.ctensor, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseCsrOut(out *Tensor, denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseCsrOut(ptr, out.ctensor, ts.ctensor, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseOut(out *Tensor, layout Layout, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
blocksizeLen := len(blocksize)
var cdenseDimVal int64 = 0
var cdenseDimNull int = 1
if len(denseDim) > 0 {
cdenseDimVal = denseDim[0]
cdenseDimNull = 0
}
lib.AtgToSparseOut(ptr, out.ctensor, ts.ctensor, int8(layout), blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseSparseDim(sparseDim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToSparseSparseDim(ptr, ts.ctensor, sparseDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ToSparseSparseDimOut(out *Tensor, sparseDim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToSparseSparseDimOut(ptr, out.ctensor, ts.ctensor, sparseDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) Topk(k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
clargest := int32(0)
if largest { clargest = int32(1) }
csorted := int32(0)
if sorted { csorted = int32(1) }
lib.AtgTopk(ctensorPtr0, ts.ctensor, k, dim, clargest, csorted)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) TopkValues(values *Tensor, indices *Tensor, k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
clargest := int32(0)
if largest { clargest = int32(1) }
csorted := int32(0)
if sorted { csorted = int32(1) }
lib.AtgTopkValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, clargest, csorted)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Totype(scalarType gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTotype(ptr, ts.ctensor, scalarType.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Trace(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrace(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func TraceBackward(grad *Tensor, sizes []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizesLen := len(sizes)
lib.AtgTraceBackward(ptr, grad.ctensor, sizes, sizesLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TraceOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTraceOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Transpose(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Transpose_(dim0 int64, dim1 int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTranspose_(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TransposeCopy(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTransposeCopy(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TransposeCopyIntOut(out *Tensor, dim0 int64, dim1 int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTransposeCopyIntOut(ptr, out.ctensor, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Trapezoid(y *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrapezoid(ptr, y.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func TrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrapezoidX(ptr, y.ctensor, x.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Trapz(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrapz(ptr, y.ctensor, x.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func TrapzDx(y *Tensor, dx float64, dim int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrapzDx(ptr, y.ctensor, dx, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) TriangularSolve(a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cupper := int32(0)
if upper { cupper = int32(1) }
ctranspose := int32(0)
if transpose { ctranspose = int32(1) }
cunitriangular := int32(0)
if unitriangular { cunitriangular = int32(1) }
lib.AtgTriangularSolve(ctensorPtr0, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) TriangularSolveX(x *Tensor, m *Tensor, a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cupper := int32(0)
if upper { cupper = int32(1) }
ctranspose := int32(0)
if transpose { ctranspose = int32(1) }
cunitriangular := int32(0)
if unitriangular { cunitriangular = int32(1) }
lib.AtgTriangularSolveX(ctensorPtr0, x.ctensor, m.ctensor, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Tril(diagonal int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTril(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Tril_(diagonal int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTril_(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func TrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrilIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func TrilIndicesOut(out *Tensor, row int64, col int64, offset int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrilIndicesOut(ptr, out.ctensor, row, col, offset)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TrilOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrilOut(ptr, out.ctensor, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func TripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cswap := int32(0)
if swap { cswap = int32(1) }
lib.AtgTripletMarginLoss(ptr, anchor.ctensor, positive.ctensor, negative.ctensor, margin, p, eps, cswap, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Triu(diagonal int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriu(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Triu_(diagonal int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriu_(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func TriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriuIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func TriuIndicesOut(out *Tensor, row int64, col int64, offset int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriuIndicesOut(ptr, out.ctensor, row, col, offset)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TriuOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriuOut(ptr, out.ctensor, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TrueDivide(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivide(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TrueDivide_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivide_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TrueDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TrueDivideScalar(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivideScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TrueDivideScalar_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivideScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Trunc(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrunc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Trunc_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrunc_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TruncOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTruncOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) TypeAs(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTypeAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Unflatten(dim int64, sizes []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizesLen := len(sizes)
lib.AtgUnflatten(ptr, ts.ctensor, dim, sizes, sizesLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnfold(ptr, ts.ctensor, dimension, size, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizesLen := len(inputSizes)
lib.AtgUnfoldBackward(ptr, gradIn.ctensor, inputSizes, inputSizesLen, dim, size, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UnfoldBackwardOut(out *Tensor, gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
inputSizesLen := len(inputSizes)
lib.AtgUnfoldBackwardOut(ptr, out.ctensor, gradIn.ctensor, inputSizes, inputSizesLen, dim, size, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UnfoldCopy(dimension int64, size int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnfoldCopy(ptr, ts.ctensor, dimension, size, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UnfoldCopyOut(out *Tensor, dimension int64, size int64, step int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnfoldCopyOut(ptr, out.ctensor, ts.ctensor, dimension, size, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Uniform(from float64, to float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUniform(ptr, ts.ctensor, from, to)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Uniform_(from float64, to float64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUniform_(ptr, ts.ctensor, from, to)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UniformOut(out *Tensor, from float64, to float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUniformOut(ptr, out.ctensor, ts.ctensor, from, to)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) UniqueConsecutive(returnInverse bool, returnCounts bool, dim []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
creturnCounts := int32(0)
if returnCounts { creturnCounts = int32(1) }
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgUniqueConsecutive(ctensorPtr0, ts.ctensor, creturnInverse, creturnCounts, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) UniqueConsecutiveOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, returnInverse bool, returnCounts bool, dim []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
creturnCounts := int32(0)
if returnCounts { creturnCounts = int32(1) }
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgUniqueConsecutiveOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, ts.ctensor, creturnInverse, creturnCounts, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) UniqueDim(dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted { csorted = int32(1) }
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
creturnCounts := int32(0)
if returnCounts { creturnCounts = int32(1) }
lib.AtgUniqueDim(ctensorPtr0, ts.ctensor, dim, csorted, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) UniqueDimConsecutive(dim int64, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
creturnCounts := int32(0)
if returnCounts { creturnCounts = int32(1) }
lib.AtgUniqueDimConsecutive(ctensorPtr0, ts.ctensor, dim, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) UniqueDimConsecutiveOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, dim int64, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
creturnCounts := int32(0)
if returnCounts { creturnCounts = int32(1) }
lib.AtgUniqueDimConsecutiveOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, ts.ctensor, dim, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) UniqueDimOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted { csorted = int32(1) }
creturnInverse := int32(0)
if returnInverse { creturnInverse = int32(1) }
creturnCounts := int32(0)
if returnCounts { creturnCounts = int32(1) }
lib.AtgUniqueDimOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, ts.ctensor, dim, csorted, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
retVal2 = &Tensor{ctensor: *ctensorPtr2}
return retVal0, retVal1, retVal2, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Unsqueeze(dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnsqueeze(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Unsqueeze_(dim int64)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnsqueeze_(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UnsqueezeCopy(dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnsqueezeCopy(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UnsqueezeCopyOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnsqueezeCopyOut(ptr, out.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleBicubic2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBicubic2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleBicubic2dVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
scaleFactorsLen := len(scaleFactors)
lib.AtgUpsampleBicubic2dVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleBilinear2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBilinear2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleBilinear2dVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
scaleFactorsLen := len(scaleFactors)
lib.AtgUpsampleBilinear2dVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleLinear1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleLinear1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleLinear1dVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
scaleFactorsLen := len(scaleFactors)
lib.AtgUpsampleLinear1dVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleNearest1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleNearest1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest1dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
scaleFactorsLen := len(scaleFactors)
lib.AtgUpsampleNearest1dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest2dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
scaleFactorsLen := len(scaleFactors)
lib.AtgUpsampleNearest2dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleNearest3dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
scaleFactorsLen := len(scaleFactors)
lib.AtgUpsampleNearest3dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleTrilinear3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
inputSizeLen := len(inputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleTrilinear3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) UpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func UpsampleTrilinear3dVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
outputSizeLen := len(outputSize)
calignCorners := int32(0)
if alignCorners { calignCorners = int32(1) }
scaleFactorsLen := len(scaleFactors)
lib.AtgUpsampleTrilinear3dVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizesLen := len(sizes)
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgValueSelectingReductionBackward(ptr, grad.ctensor, dim, indices.ctensor, sizes, sizesLen, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Values(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgValues(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ValuesCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgValuesCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ValuesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgValuesCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Vander(x *Tensor, n []int64, increasing bool)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
cincreasing := int32(0)
if increasing { cincreasing = int32(1) }
lib.AtgVander(ptr, x.ctensor, cnVal, cnNull, cincreasing)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Var(unbiased bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
lib.AtgVar(ptr, ts.ctensor, cunbiased)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) VarCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgVarCorrection(ptr, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) VarCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgVarCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) VarDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgVarDim(ptr, ts.ctensor, dim, dimLen, cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) VarMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
lib.AtgVarMean(ctensorPtr0, ts.ctensor, cunbiased)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) VarMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
dimLen := len(dim)
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgVarMeanCorrection(ctensorPtr0, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) VarMeanCorrectionOut(out0 *Tensor, out1 *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
dimLen := len(dim)
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgVarMeanCorrectionOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed ntensors`:
// ---------------------------------
func(ts *Tensor) VarMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) {
if del { defer ts.MustDrop() }
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
dimLen := len(dim)
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgVarMeanDim(ctensorPtr0, ts.ctensor, dim, dimLen, cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = &Tensor{ctensor: *ctensorPtr0}
retVal1 = &Tensor{ctensor: *ctensorPtr1}
return retVal0, retVal1, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) VarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
dimLen := len(dim)
cunbiased := int32(0)
if unbiased { cunbiased = int32(1) }
ckeepdim := int32(0)
if keepdim { ckeepdim = int32(1) }
lib.AtgVarOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Vdot(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgVdot(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) VdotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgVdotOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) View(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgView(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewAs(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewAsComplex(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAsComplex(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewAsComplexCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAsComplexCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewAsComplexCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAsComplexCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewAsReal(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAsReal(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewAsRealCopy(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAsRealCopy(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewAsRealCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAsRealCopyOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewCopy(size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgViewCopy(ptr, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewCopyDtype(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewCopyDtype(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewCopyDtypeOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewCopyDtypeOut(ptr, out.ctensor, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewCopyOut(out *Tensor, size []int64, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgViewCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ViewDtype(dtype gotch.DType, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewDtype(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Vstack(tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgVstack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func VstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {ctensors = append(ctensors, t.ctensor)}
lib.AtgVstackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func WhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereScalar(ptr, condition.ctensor, selfScalar.cscalar, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) WhereScalarother(condition *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereScalarother(ptr, condition.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func WhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereScalarself(ptr, condition.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) WhereSelf(condition *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereSelf(ptr, condition.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) WhereSelfOut(out *Tensor, condition *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereSelfOut(ptr, out.ctensor, condition.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Xlogy(other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogy(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Xlogy_(other *Tensor)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogy_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) XlogyOutscalarOther(out *Tensor, other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyOutscalarOther(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func XlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyOutscalarSelf(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) XlogyOuttensor(out *Tensor, other *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyOuttensor(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) XlogyScalarOther(other *Scalar, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyScalarOther(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) XlogyScalarOther_(other *Scalar)(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyScalarOther_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func XlogyScalarSelf(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyScalarSelf(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Zero(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZero(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) Zero_()(err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZero_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ZeroOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZeroOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func Zeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgZeros(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ZerosLike(del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZerosLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func(ts *Tensor) ZerosLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) {
if del { defer ts.MustDrop() }
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZerosLikeOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// func.returns = `fixed 1`:
// --------------------------
func ZerosOut(out *Tensor, size []int64)(retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
sizeLen := len(size)
lib.AtgZerosOut(ptr, out.ctensor, size, sizeLen)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = &Tensor{ctensor: *ptr}
return retVal, err
}
// End of implementing Tensor =================================