gotch/ts/tensor-generated.go
2023-07-04 23:26:20 +10:00

29612 lines
748 KiB
Go

package ts
// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!
// #include "stdlib.h"
import "C"
import (
"unsafe"
"github.com/sugarme/gotch"
lib "github.com/sugarme/gotch/libtch"
)
func (ts *Tensor) __And_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__And_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __AndTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__AndTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Iand_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Iand_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __IandTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IandTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Ilshift_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Ilshift_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __IlshiftTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IlshiftTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Ior_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Ior_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __IorTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IorTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Irshift_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Irshift_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __IrshiftTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IrshiftTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Ixor_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Ixor_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __IxorTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__IxorTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Lshift_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Lshift_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __LshiftTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__LshiftTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Or_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Or_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __OrTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__OrTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Rshift_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Rshift_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __RshiftTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__RshiftTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __Xor_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__Xor_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) __XorTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg__XorTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AdaptiveAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AddBatchDim(batchDim int64, level int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddBatchDim(ptr, ts.ctensor, batchDim, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AddRelu(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddRelu(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AddRelu_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddRelu_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _AddReluOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddReluOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AddReluScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddReluScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AddReluScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AddReluScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _Aminmax(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_Aminmax(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _AminmaxDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.Atg_AminmaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_AmpUpdateScale_(ptr, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _AutocastToFullPrecision(cudaEnabled bool, cpuEnabled bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccudaEnabled := int32(0)
if cudaEnabled {
ccudaEnabled = int32(1)
}
ccpuEnabled := int32(0)
if cpuEnabled {
ccpuEnabled = int32(1)
}
lib.Atg_AutocastToFullPrecision(ptr, ts.ctensor, ccudaEnabled, ccpuEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _AutocastToReducedPrecision(cudaEnabled bool, cpuEnabled bool, cudaDtype gotch.DType, cpuDtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccudaEnabled := int32(0)
if cudaEnabled {
ccudaEnabled = int32(1)
}
ccpuEnabled := int32(0)
if cpuEnabled {
ccpuEnabled = int32(1)
}
lib.Atg_AutocastToReducedPrecision(ptr, ts.ctensor, ccudaEnabled, ccpuEnabled, cudaDtype.CInt(), cpuDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CastByte(nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CastByte(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CastChar(nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CastChar(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CastDouble(nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CastDouble(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CastFloat(nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CastFloat(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CastHalf(nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CastHalf(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CastInt(nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CastInt(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CastLong(nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CastLong(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CastShort(nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CastShort(ptr, ts.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _Cat(tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.Atg_Cat(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.Atg_CatOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_CdistBackward(ptr, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CholeskySolveHelper(a *Tensor, upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.Atg_CholeskySolveHelper(ptr, ts.ctensor, a.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Coalesce(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Coalesce(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Coalesced_(coalesced bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccoalesced := int32(0)
if coalesced {
ccoalesced = int32(1)
}
lib.Atg_Coalesced_(ptr, ts.ctensor, ccoalesced)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func _ComputeLinearCombination(input *Tensor, coefficients *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ComputeLinearCombination(ptr, input.ctensor, coefficients.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ComputeLinearCombinationOut(ptr, out.ctensor, input.ctensor, coefficients.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Conj(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Conj(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ConjPhysical(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ConjPhysical(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ConvDepthwise2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ConvDepthwise2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ConvDepthwise2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ConvDepthwise2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
lib.Atg_ConvertIndicesFromCooToCsr(ptr, ts.ctensor, size, coutInt32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt32 bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
lib.Atg_ConvertIndicesFromCooToCsrOut(ptr, out.ctensor, ts.ctensor, size, coutInt32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _ConvertIndicesFromCsrToCoo(crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
ctranspose := int32(0)
if transpose {
ctranspose = int32(1)
}
lib.Atg_ConvertIndicesFromCsrToCoo(ptr, crowIndices.ctensor, colIndices.ctensor, coutInt32, ctranspose)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _ConvertIndicesFromCsrToCooOut(out *Tensor, crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
ctranspose := int32(0)
if transpose {
ctranspose = int32(1)
}
lib.Atg_ConvertIndicesFromCsrToCooOut(ptr, out.ctensor, crowIndices.ctensor, colIndices.ctensor, coutInt32, ctranspose)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctransposed := int32(0)
if transposed {
ctransposed = int32(1)
}
cbenchmark := int32(0)
if benchmark {
cbenchmark = int32(1)
}
cdeterministic := int32(0)
if deterministic {
cdeterministic = int32(1)
}
ccudnnEnabled := int32(0)
if cudnnEnabled {
ccudnnEnabled = int32(1)
}
callowTf32 := int32(0)
if allowTf32 {
callowTf32 = int32(1)
}
lib.Atg_Convolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _ConvolutionDeprecated(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctransposed := int32(0)
if transposed {
ctransposed = int32(1)
}
cbenchmark := int32(0)
if benchmark {
cbenchmark = int32(1)
}
cdeterministic := int32(0)
if deterministic {
cdeterministic = int32(1)
}
ccudnnEnabled := int32(0)
if cudnnEnabled {
ccudnnEnabled = int32(1)
}
lib.Atg_ConvolutionDeprecated(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ConvolutionMode(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CopyFrom(dst *Tensor, nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_CopyFrom(ptr, ts.ctensor, dst.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _CopyFromAndResize(dst *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_CopyFromAndResize(ptr, ts.ctensor, dst.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
czeroInfinity := int32(0)
if zeroInfinity {
czeroInfinity = int32(1)
}
lib.Atg_CtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func _CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
czeroInfinity := int32(0)
if zeroInfinity {
czeroInfinity = int32(1)
}
lib.Atg_CtcLossBackward(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _CudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cdeterministic := int32(0)
if deterministic {
cdeterministic = int32(1)
}
czeroInfinity := int32(0)
if zeroInfinity {
czeroInfinity = int32(1)
}
lib.Atg_CudnnCtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, cdeterministic, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func _CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.Atg_CudnnInitDropoutState(ptr, dropout, ctrain, dropoutSeed, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _CudnnRnn(input *Tensor, weight []Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
var cweight []lib.Ctensor
for _, t := range weight {
cweight = append(cweight, t.ctensor)
}
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
lib.Atg_CudnnRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, weightBuf.ctensor, hx.ctensor, cx.ctensor, mode, hiddenSize, projSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, len(batchSizes), dropoutState.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
retVal4 = newTensor(*ctensorPtr4)
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
func _CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cweightArr []lib.Ctensor
for _, t := range weightArr {
cweightArr = append(cweightArr, t.ctensor)
}
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
lib.Atg_CudnnRnnFlattenWeight(ptr, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, cbatchFirst, cbidirectional)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _CufftGetPlanCacheMaxSize(deviceIndex int64) (retVal int64, err error) {
retVal = lib.Atg_CufftGetPlanCacheMaxSize(deviceIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func _CufftGetPlanCacheSize(deviceIndex int64) (retVal int64, err error) {
retVal = lib.Atg_CufftGetPlanCacheSize(deviceIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) _DebugHasInternalOverlap(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.Atg_DebugHasInternalOverlap(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) _DetLuBasedHelper(del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_DetLuBasedHelper(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) _DetLuBasedHelperBackwardHelper(detGrad *Tensor, det *Tensor, lu *Tensor, pivs *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_DetLuBasedHelperBackwardHelper(ptr, detGrad.ctensor, det.ctensor, ts.ctensor, lu.ctensor, pivs.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _DimArange(like *Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_DimArange(ptr, like.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Dimi(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.Atg_Dimi(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) _Dimv(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.Atg_Dimv(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func _DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_DirichletGrad(ptr, x.ctensor, alpha.ctensor, total.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _Efficientzerotensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Efficientzerotensor(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
csparse := int32(0)
if sparse {
csparse = int32(1)
}
cincludeLastOffset := int32(0)
if includeLastOffset {
cincludeLastOffset = int32(1)
}
lib.Atg_EmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
return retVal0, retVal1, retVal2, retVal3, err
}
func _EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
csparse := int32(0)
if sparse {
csparse = int32(1)
}
lib.Atg_EmbeddingBagBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
lib.Atg_EmbeddingBagDenseBackward(ptr, grad.ctensor, indices.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _EmbeddingBagForwardOnly(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
csparse := int32(0)
if sparse {
csparse = int32(1)
}
cincludeLastOffset := int32(0)
if includeLastOffset {
cincludeLastOffset = int32(1)
}
lib.Atg_EmbeddingBagForwardOnly(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
return retVal0, retVal1, retVal2, retVal3, err
}
func _EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_EmbeddingBagPerSampleWeightsBackward(ptr, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
lib.Atg_EmbeddingBagSparseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_EmptyAffineQuantized(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt(), scale, zeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_EmptyPerChannelAffineQuantized(ptr, size, len(size), scales.ctensor, zeroPoints.ctensor, axis, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _EuclideanDist(x1 *Tensor, x2 *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_EuclideanDist(ptr, x1.ctensor, x2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FakeQuantizeLearnablePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FakeQuantizeLearnablePerChannelAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) _FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FakeQuantizeLearnablePerTensorAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FakeQuantizeLearnablePerTensorAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) _FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FakeQuantizePerTensorAffineCachemaskTensorQparams(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, fakeQuantEnabled.ctensor, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _FftC2c(dim []int64, normalization int64, forward bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cforward := int32(0)
if forward {
cforward = int32(1)
}
lib.Atg_FftC2c(ptr, ts.ctensor, dim, len(dim), normalization, cforward)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FftC2cOut(out *Tensor, dim []int64, normalization int64, forward bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cforward := int32(0)
if forward {
cforward = int32(1)
}
lib.Atg_FftC2cOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, cforward)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FftC2r(dim []int64, normalization int64, lastDimSize int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FftC2r(ptr, ts.ctensor, dim, len(dim), normalization, lastDimSize)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FftC2rOut(out *Tensor, dim []int64, normalization int64, lastDimSize int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FftC2rOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, lastDimSize)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FftR2c(dim []int64, normalization int64, onesided bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
conesided := int32(0)
if onesided {
conesided = int32(1)
}
lib.Atg_FftR2c(ptr, ts.ctensor, dim, len(dim), normalization, conesided)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FftR2cOut(out *Tensor, dim []int64, normalization int64, onesided bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
conesided := int32(0)
if onesided {
conesided = int32(1)
}
lib.Atg_FftR2cOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), normalization, conesided)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _FusedDropout(p float64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_FusedDropout(ctensorPtr0, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _FusedMovingAvgObsFqHelper(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cperRowFakeQuant := int32(0)
if perRowFakeQuant {
cperRowFakeQuant = int32(1)
}
csymmetricQuant := int32(0)
if symmetricQuant {
csymmetricQuant = int32(1)
}
lib.Atg_FusedMovingAvgObsFqHelper(ctensorPtr0, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _FwPrimal(level int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_FwPrimal(ptr, ts.ctensor, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_GatherSparseBackward(ptr, ts.ctensor, dim, index.ctensor, grad.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
lib.Atg_GridSampler2dCpuFallback(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
lib.Atg_GridSampler2dCpuFallbackBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _HasCompatibleShallowCopyType(from *Tensor, del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.Atg_HasCompatibleShallowCopyType(ts.ctensor, from.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) _HasSameStorageNumel(other *Tensor, del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.Atg_HasSameStorageNumel(ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) _HistogramddFromBinTensors(bins []Tensor, weight *Tensor, density bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cbins []lib.Ctensor
for _, t := range bins {
cbins = append(cbins, t.ctensor)
}
cdensity := int32(0)
if density {
cdensity = int32(1)
}
lib.Atg_HistogramddFromBinTensors(ptr, ts.ctensor, cbins, len(cbins), weight.ctensor, cdensity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Indices(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Indices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _IsZerotensor(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.Atg_IsZerotensor(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) _LinalgInvOutHelper_(infosLu *Tensor, infosGetri *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_LinalgInvOutHelper_(ptr, ts.ctensor, infosLu.ctensor, infosGetri.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _LinalgQrHelper(mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_LinalgQrHelper(ctensorPtr0, ts.ctensor, mode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func _LinalgSvd(a *Tensor, fullMatrices bool, computeUv bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cfullMatrices := int32(0)
if fullMatrices {
cfullMatrices = int32(1)
}
ccomputeUv := int32(0)
if computeUv {
ccomputeUv = int32(1)
}
lib.Atg_LinalgSvd(ctensorPtr0, a.ctensor, cfullMatrices, ccomputeUv)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func _LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool, computeUv bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cfullMatrices := int32(0)
if fullMatrices {
cfullMatrices = int32(1)
}
ccomputeUv := int32(0)
if computeUv {
ccomputeUv = int32(1)
}
lib.Atg_LinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, a.ctensor, cfullMatrices, ccomputeUv)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) _LogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat {
chalfToFloat = int32(1)
}
lib.Atg_LogSoftmax(ptr, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_LogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_LogSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat {
chalfToFloat = int32(1)
}
lib.Atg_LogSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Logcumsumexp(dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Logcumsumexp(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _LogcumsumexpOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_LogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _LuWithInfo(pivot bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot {
cpivot = int32(1)
}
ccheckErrors := int32(0)
if checkErrors {
ccheckErrors = int32(1)
}
lib.Atg_LuWithInfo(ctensorPtr0, ts.ctensor, cpivot, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func _MakeDual(primal *Tensor, tangent *Tensor, level int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakeDual(ptr, primal.ctensor, tangent.ctensor, level)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakePerChannelQuantizedTensor(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MakePerTensorQuantizedTensor(ptr, ts.ctensor, scale, zeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _MaskedScale(mask *Tensor, scale float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MaskedScale(ptr, ts.ctensor, mask.ctensor, scale)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _MaskedSoftmax(mask *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MaskedSoftmax(ptr, ts.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _MkldnnReshape(shape []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MkldnnReshape(ptr, ts.ctensor, shape, len(shape))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _MkldnnTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MkldnnTranspose(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _MkldnnTranspose_(dim0 int64, dim1 int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_MkldnnTranspose_(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _NegView(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NegView(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _NewZerosWithSameFeatureMeta(other *Tensor, selfNumBatchDims int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NewZerosWithSameFeatureMeta(ptr, ts.ctensor, other.ctensor, selfNumBatchDims)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _NnpackAvailable() (retVal bool, err error) {
retVal = lib.Atg_NnpackAvailable()
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func _NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_NnpackSpatialConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Nnz(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.Atg_Nnz(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func _PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
lib.Atg_PackPaddedSequence(ctensorPtr0, input.ctensor, lengths.ctensor, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func _PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
lib.Atg_PackPaddedSequenceBackward(ptr, grad.ctensor, inputSize, len(inputSize), batchSizes.ctensor, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddingValue *Scalar, totalLength int64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
lib.Atg_PadPackedSequence(ctensorPtr0, data.ctensor, batchSizes.ctensor, cbatchFirst, paddingValue.cscalar, totalLength)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_PdistBackward(ptr, grad.ctensor, ts.ctensor, p, pdist.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _PinMemory(device gotch.Device, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_PinMemory(ptr, ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_RemoveBatchDim(ptr, ts.ctensor, level, batchSize, outDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ReshapeAlias(size []int64, stride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ReshapeAlias(ptr, ts.ctensor, size, len(size), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ReshapeFromTensor(shape *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ReshapeFromTensor(ptr, ts.ctensor, shape.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DType) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_RowwisePrune(ctensorPtr0, weight.ctensor, mask.ctensor, compressedIndicesDtype.CInt())
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _SWhere(condition *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SWhere(ptr, condition.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SampleDirichlet(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SampleDirichlet(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SaturateWeightToFp16(weight *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SaturateWeightToFp16(ptr, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, axis int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SegmentReduceBackward(ptr, grad.ctensor, output.ctensor, data.ctensor, reduce, lengths.ctensor, axis)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ShapeAsTensor(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_ShapeAsTensor(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SlowConv2dBackward(gradInput *Tensor, gradWeight *Tensor, gradBias *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_SlowConv2dBackward(ctensorPtr0, gradInput.ctensor, gradWeight.ctensor, gradBias.ctensor, gradOutput.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding))
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func _SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int64, numGenerated int64, dtype gotch.DType) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_SobolEngineDraw(ctensorPtr0, quasi.ctensor, n, sobolstate.ctensor, dimension, numGenerated, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SobolEngineFf_(ptr, ts.ctensor, n, sobolstate.ctensor, dimension, numGenerated)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _SobolEngineInitializeState_(dimension int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SobolEngineInitializeState_(ptr, ts.ctensor, dimension)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _SobolEngineScramble_(ltm *Tensor, dimension int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SobolEngineScramble_(ptr, ts.ctensor, ltm.ctensor, dimension)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) _Softmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat {
chalfToFloat = int32(1)
}
lib.Atg_Softmax(ptr, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SoftmaxBackwardDataOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat {
chalfToFloat = int32(1)
}
lib.Atg_SoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SolveHelper(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_SolveHelper(ctensorPtr0, ts.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _SparseAddmm(sparse *Tensor, dense *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseAddmm(ptr, ts.ctensor, sparse.ctensor, dense.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseBroadcastTo(size []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseBroadcastTo(ptr, ts.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseCooTensorUnsafe(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseCooTensorWithDims(ptr, sparseDim, denseDim, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseCooTensorWithDimsAndTensors(ptr, sparseDim, denseDim, size, len(size), indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseCsrTensorUnsafe(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseLogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat {
chalfToFloat = int32(1)
}
lib.Atg_SparseLogSoftmax(ptr, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseLogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseLogSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SparseMaskHelper(t *Tensor, maskIndices *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseMaskHelper(ptr, t.ctensor, maskIndices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _SparseMm(sparse *Tensor, dense *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseMm(ptr, sparse.ctensor, dense.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chalfToFloat := int32(0)
if halfToFloat {
chalfToFloat = int32(1)
}
lib.Atg_SparseSoftmax(ptr, ts.ctensor, dim, chalfToFloat)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSparseMatmul(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSparseMatmul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSum(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSum(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSumBackward(grad *Tensor, dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSumBackward(ptr, grad.ctensor, ts.ctensor, dim, len(dim))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSumDim(dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSumDim(ptr, ts.ctensor, dim, len(dim))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSumDimDtype(ptr, ts.ctensor, dim, len(dim), dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SparseSumDtype(dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_SparseSumDtype(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _Stack(tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.Atg_Stack(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _StackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.Atg_StackOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _StandardGamma(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_StandardGamma(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _StandardGammaGrad(output *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_StandardGammaGrad(ptr, ts.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _SymeigHelper(eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ceigenvectors := int32(0)
if eigenvectors {
ceigenvectors = int32(1)
}
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.Atg_SymeigHelper(ctensorPtr0, ts.ctensor, ceigenvectors, cupper)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func _TestAmbiguousDefaults(dummy *Tensor, a int64, b int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAmbiguousDefaults(ptr, dummy.ctensor, a, b)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestAmbiguousDefaultsB(ptr, dummy.ctensor, a, b)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _TestOptionalFilledIntlist(values *Tensor, addends []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestOptionalFilledIntlist(ptr, values.ctensor, addends, len(addends))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _TestOptionalIntlist(values *Tensor, addends []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestOptionalIntlist(ptr, values.ctensor, addends, len(addends))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _TestSerializationSubcmul(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestSerializationSubcmul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _TestStringDefault(dummy *Tensor, a string, b string) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestStringDefault(ptr, dummy.ctensor, a, b)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _TestWarnInAutograd(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TestWarnInAutograd(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.Atg_ToCopy(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _TorchCudaCuLinkerSymbolOp(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_TorchCudaCuLinkerSymbolOp(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Trilinear(ptr, i1.ctensor, i2.ctensor, i3.ctensor, expand1, len(expand1), expand2, len(expand2), expand3, len(expand3), sumdim, len(sumdim), unrollDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Unique(sorted bool, returnInverse bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted {
csorted = int32(1)
}
creturnInverse := int32(0)
if returnInverse {
creturnInverse = int32(1)
}
lib.Atg_Unique(ctensorPtr0, ts.ctensor, csorted, creturnInverse)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _Unique2(sorted bool, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted {
csorted = int32(1)
}
creturnInverse := int32(0)
if returnInverse {
creturnInverse = int32(1)
}
creturnCounts := int32(0)
if returnCounts {
creturnCounts = int32(1)
}
lib.Atg_Unique2(ctensorPtr0, ts.ctensor, csorted, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func _UnpackDual(dual *Tensor, level int64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_UnpackDual(ctensorPtr0, dual.ctensor, level)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) _UnsafeView(size []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_UnsafeView(ptr, ts.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleBicubic2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBicubic2dAa(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleBicubic2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBicubic2dAaBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleBicubic2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBicubic2dAaBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleBicubic2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBicubic2dAaOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleBilinear2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBilinear2dAa(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleBilinear2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBilinear2dAaBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleBilinear2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBilinear2dAaBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleBilinear2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleBilinear2dAaOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleNearestExact1d(outputSize []int64, scales []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.Atg_UpsampleNearestExact1d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleNearestExact1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.Atg_UpsampleNearestExact1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleNearestExact1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.Atg_UpsampleNearestExact1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleNearestExact1dOut(out *Tensor, outputSize []int64, scales []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.Atg_UpsampleNearestExact1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleNearestExact2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact2d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleNearestExact2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleNearestExact2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleNearestExact2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleNearestExact3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact3d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleNearestExact3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UpsampleNearestExact3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _UpsampleNearestExact3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.Atg_UpsampleNearestExact3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64) (retVal bool, err error) {
retVal = lib.Atg_UseCudnnCtcLoss(logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func _UseCudnnRnnFlattenWeight() (retVal bool, err error) {
retVal = lib.Atg_UseCudnnRnnFlattenWeight()
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) _Values(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_Values(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) _Version(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.Atg_Version(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func _WeightNorm(v *Tensor, g *Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.Atg_WeightNorm(ptr, v.ctensor, g.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func _WeightNormCudaInterface(v *Tensor, g *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_WeightNormCudaInterface(ctensorPtr0, v.ctensor, g.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func _WeightNormCudaInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_WeightNormCudaInterfaceBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func _WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.Atg_WeightNormDifferentiableBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Abs(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbs(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Abs_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbs_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AbsOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbsOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Absolute(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbsolute(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Absolute_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbsolute_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AbsoluteOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAbsoluteOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Acos(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcos(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Acos_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcos_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AcosOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcosOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Acosh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcosh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Acosh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcosh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AcoshOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAcoshOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveAvgPool1d(ptr, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveAvgPool3dBackward(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveMaxPool1d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgAdaptiveMaxPool1d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) AdaptiveMaxPool2d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgAdaptiveMaxPool2d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) AdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgAdaptiveMaxPool2dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) AdaptiveMaxPool3d(outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgAdaptiveMaxPool3d(ctensorPtr0, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) AdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdaptiveMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgAdaptiveMaxPool3dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Add(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdd(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Add_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdd_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AddOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AddScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AddScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Addbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addbmm_(batch1 *Tensor, batch2 *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcdiv(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addcdiv_(tensor1 *Tensor, tensor2 *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcdiv_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcdivOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addcmul(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcmul(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addcmul_(tensor1 *Tensor, tensor2 *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcmul_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddcmulOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addmm_(mat1 *Tensor, mat2 *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmm_(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addmv(mat *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmv(ptr, ts.ctensor, mat.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addmv_(mat *Tensor, vec *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmv_(ptr, ts.ctensor, mat.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddmvOut(ptr, out.ctensor, ts.ctensor, mat.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Addr_(vec1 *Tensor, vec2 *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Adjoint(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAdjoint(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func AffineGridGenerator(theta *Tensor, size []int64, alignCorners bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
lib.AtgAffineGridGenerator(ptr, theta.ctensor, size, len(size), calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func AffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
lib.AtgAffineGridGeneratorBackward(ptr, grad.ctensor, size, len(size), calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Alias(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAlias(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AlignAs(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAlignAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) All(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAll(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AllAllOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAllAllOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AllDim(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAllDim(ptr, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AllOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAllOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Allclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
cequalNan := int32(0)
if equalNan {
cequalNan = int32(1)
}
retVal = lib.AtgAllclose(ts.ctensor, other.ctensor, rtol, atol, cequalNan)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func AlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgAlphaDropout(ptr, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AlphaDropout_(p float64, train bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgAlphaDropout_(ptr, ts.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Amax(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAmax(ptr, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AmaxOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAmaxOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Amin(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAmin(ptr, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AminOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAminOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Aminmax(dim []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAminmax(ctensorPtr0, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) AminmaxOut(min *Tensor, max *Tensor, dim []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAminmaxOut(ctensorPtr0, min.ctensor, max.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Angle(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAngle(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AngleOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAngleOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Any(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAny(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AnyAllOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAnyAllOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AnyDim(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAnyDim(ptr, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AnyOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgAnyOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Arange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArange(ptr, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ArangeOut(out *Tensor, end *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArangeOut(ptr, out.ctensor, end.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArangeStart(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ArangeStartOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArangeStartOut(ptr, out.ctensor, start.cscalar, end.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ArangeStartStep(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArangeStartStep(ptr, start.cscalar, end.cscalar, step.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arccos(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccos(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arccos_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccos_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ArccosOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccosOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arccosh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccosh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arccosh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccosh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ArccoshOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArccoshOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arcsin(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsin(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arcsin_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsin_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ArcsinOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsinOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arcsinh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsinh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arcsinh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsinh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ArcsinhOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArcsinhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arctan(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arctan2(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan2(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arctan2_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan2_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Arctan2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arctan_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctan_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ArctanOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctanOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arctanh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctanh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Arctanh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctanh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ArctanhOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArctanhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Argmax(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgArgmax(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ArgmaxOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgArgmaxOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Argmin(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgArgmin(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ArgminOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgArgminOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Argsort(dim int64, descending bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cdescending := int32(0)
if descending {
cdescending = int32(1)
}
lib.AtgArgsort(ptr, ts.ctensor, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Argwhere(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgArgwhere(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AsStrided(size []int64, stride []int64, storageOffset []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstorageOffsetVal int64 = 0
var cstorageOffsetNull int = 1
if len(storageOffset) > 0 {
cstorageOffsetVal = storageOffset[0]
cstorageOffsetNull = 0
}
lib.AtgAsStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), cstorageOffsetVal, cstorageOffsetNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AsStrided_(size []int64, stride []int64, storageOffset []int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstorageOffsetVal int64 = 0
var cstorageOffsetNull int = 1
if len(storageOffset) > 0 {
cstorageOffsetVal = storageOffset[0]
cstorageOffsetNull = 0
}
lib.AtgAsStrided_(ptr, ts.ctensor, size, len(size), stride, len(stride), cstorageOffsetVal, cstorageOffsetNull)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Asin(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsin(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Asin_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsin_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AsinOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsinOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Asinh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsinh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Asinh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsinh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AsinhOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAsinhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atan(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atan2(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan2(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atan2_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan2_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Atan2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atan_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtan_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AtanOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtanOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atanh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtanh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atanh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtanh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) AtanhOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtanhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atleast1d(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtleast1d(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atleast2d(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtleast2d(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Atleast3d(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgAtleast3d(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
lib.AtgAvgPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) AvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
ccountIncludePad := int32(0)
if countIncludePad {
ccountIncludePad = int32(1)
}
var cdivisorOverrideVal int64 = 0
var cdivisorOverrideNull int = 1
if len(divisorOverride) > 0 {
cdivisorOverrideVal = divisorOverride[0]
cdivisorOverrideNull = 0
}
lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Baddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBaddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Baddbmm_(batch1 *Tensor, batch2 *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBaddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBaddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBartlettWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic {
cperiodic = int32(1)
}
lib.AtgBartlettWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
ccudnnEnabled := int32(0)
if cudnnEnabled {
ccudnnEnabled = int32(1)
}
lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps, ccudnnEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor, count *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBatchNormBackwardElemt(ptr, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor, count.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BatchNormBackwardReduce(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cinputG := int32(0)
if inputG {
cinputG = int32(1)
}
cweightG := int32(0)
if weightG {
cweightG = int32(1)
}
cbiasG := int32(0)
if biasG {
cbiasG = int32(1)
}
lib.AtgBatchNormBackwardReduce(ctensorPtr0, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, cinputG, cweightG, cbiasG)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
return retVal0, retVal1, retVal2, retVal3, err
}
func BatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBatchNormElemt(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBatchNormElemtOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormGatherStats(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, count)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func BatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormGatherStatsWithCounts(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, counts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func BatchNormStats(input *Tensor, eps float64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormStats(ctensorPtr0, input.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func BatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgBatchNormUpdateStats(ctensorPtr0, input.ctensor, runningMean.ctensor, runningVar.ctensor, momentum)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Bernoulli(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulli(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Bernoulli_(p *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulli_(ptr, ts.ctensor, p.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BernoulliFloat_(p float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulliFloat_(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BernoulliOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulliOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BernoulliP(p float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBernoulliP(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Bilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBilinear(ptr, input1.ctensor, input2.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropy(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyWithLogits(ptr, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinaryCrossEntropyWithLogitsBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Bincount(weights *Tensor, minlength int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBincount(ptr, ts.ctensor, weights.ctensor, minlength)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Binomial(count *Tensor, prob *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBinomial(ptr, count.ctensor, prob.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseAnd(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAnd(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseAnd_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAnd_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseAndScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseAndTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseAndTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseAndTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseAndTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseLeftShift(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShift(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseLeftShift_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShift_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func BitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseLeftShiftTensorScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftTensorScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseLeftShiftTensorScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftTensorScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseLeftShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseNot(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseNot(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseNot_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseNot_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseNotOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseNotOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseOr(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOr(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseOr_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOr_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseOrScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseOrTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseOrTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseOrTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseOrTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseRightShift(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShift(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseRightShift_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShift_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func BitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseRightShiftTensorScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftTensorScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseRightShiftTensorScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftTensorScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseRightShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseXor(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXor(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseXor_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXor_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseXorScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseXorTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BitwiseXorTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) BitwiseXorTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBitwiseXorTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBlackmanWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic {
cperiodic = int32(1)
}
lib.AtgBlackmanWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BlockDiag(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgBlockDiag(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Bmm(mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBmm(ptr, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BmmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BroadcastTo(size []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgBroadcastTo(ptr, ts.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Bucketize(boundaries *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
cright := int32(0)
if right {
cright = int32(1)
}
lib.AtgBucketize(ptr, ts.ctensor, boundaries.ctensor, coutInt32, cright)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func BucketizeScalar(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
cright := int32(0)
if right {
cright = int32(1)
}
lib.AtgBucketizeScalar(ptr, selfScalar.cscalar, boundaries.ctensor, coutInt32, cright)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) BucketizeTensorOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
cright := int32(0)
if right {
cright = int32(1)
}
lib.AtgBucketizeTensorOut(ptr, out.ctensor, ts.ctensor, boundaries.ctensor, coutInt32, cright)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CanCast(from gotch.DType, to gotch.DType) (retVal bool, err error) {
retVal = lib.AtgCanCast(from.CInt(), to.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func CartesianProd(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgCartesianProd(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Cat(tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgCat(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgCatOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cauchy_(median float64, sigma float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCauchy_(ptr, ts.ctensor, median, sigma)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func Cdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ccomputeModeVal int64 = 0
var ccomputeModeNull int = 1
if len(computeMode) > 0 {
ccomputeModeVal = computeMode[0]
ccomputeModeNull = 0
}
lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, ccomputeModeVal, ccomputeModeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ceil(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCeil(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ceil_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCeil_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) CeilOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCeilOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Celu(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Celu_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCelu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func ChainMatmul(matrices []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cmatrices []lib.Ctensor
for _, t := range matrices {
cmatrices = append(cmatrices, t.ctensor)
}
lib.AtgChainMatmul(ptr, cmatrices, len(cmatrices))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ChainMatmulOut(out *Tensor, matrices []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cmatrices []lib.Ctensor
for _, t := range matrices {
cmatrices = append(cmatrices, t.ctensor)
}
lib.AtgChainMatmulOut(ptr, out.ctensor, cmatrices, len(cmatrices))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ChannelShuffle(groups int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgChannelShuffle(ptr, ts.ctensor, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cholesky(upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgCholesky(ptr, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CholeskyInverse(upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgCholeskyInverse(ptr, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CholeskyInverseOut(out *Tensor, upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgCholeskyInverseOut(ptr, out.ctensor, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CholeskySolve(input2 *Tensor, upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgCholeskySolve(ptr, ts.ctensor, input2.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgCholeskySolveOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float64, bitWidth int64) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgChooseQparamsOptimized(ctensorPtr0, input.ctensor, numel, nBins, ratio, bitWidth)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Clamp(min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Clamp_(min *Scalar, max *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ClampMax(max *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMax(ptr, ts.ctensor, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampMax_(max *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMax_(ptr, ts.ctensor, max.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ClampMaxOut(out *Tensor, max *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMaxOut(ptr, out.ctensor, ts.ctensor, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampMaxTensor(max *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMaxTensor(ptr, ts.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampMaxTensor_(max *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMaxTensor_(ptr, ts.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ClampMaxTensorOut(out *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMaxTensorOut(ptr, out.ctensor, ts.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampMin(min *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMin(ptr, ts.ctensor, min.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampMin_(min *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMin_(ptr, ts.ctensor, min.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ClampMinOut(out *Tensor, min *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMinOut(ptr, out.ctensor, ts.ctensor, min.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampMinTensor(min *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMinTensor(ptr, ts.ctensor, min.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampMinTensor_(min *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMinTensor_(ptr, ts.ctensor, min.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ClampMinTensorOut(out *Tensor, min *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampMinTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampTensor(min *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampTensor(ptr, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClampTensor_(min *Tensor, max *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClampTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Clip(min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClip(ptr, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Clip_(min *Scalar, max *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClip_(ptr, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ClipOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClipOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClipTensor(min *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClipTensor(ptr, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ClipTensor_(min *Tensor, max *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClipTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgClipTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Coalesce(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCoalesce(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Col2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCol2im(ptr, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Col2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCol2imBackward(ptr, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Col2imBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCol2imBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Col2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCol2imOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ColIndices(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgColIndices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ColumnStack(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgColumnStack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ColumnStackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgColumnStackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Combinations(r int64, withReplacement bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cwithReplacement := int32(0)
if withReplacement {
cwithReplacement = int32(1)
}
lib.AtgCombinations(ptr, ts.ctensor, r, cwithReplacement)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Complex(real *Tensor, imag *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgComplex(ptr, real.ctensor, imag.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ComplexOut(out *Tensor, real *Tensor, imag *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgComplexOut(ptr, out.ctensor, real.ctensor, imag.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Concat(tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgConcat(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ConcatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgConcatOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Conj(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConj(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ConjPhysical(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConjPhysical(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ConjPhysical_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConjPhysical_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ConjPhysicalOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConjPhysicalOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ConstantPadNd(pad []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConstantPadNd(ptr, ts.ctensor, pad, len(pad))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Contiguous(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgContiguous(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Conv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Conv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConv1dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Conv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Conv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConv2dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Conv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Conv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConv3dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ConvDepthwise3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConvDepthwise3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConvTbc(ptr, ts.ctensor, weight.ctensor, bias.ctensor, pad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgConvTbcBackward(ctensorPtr0, ts.ctensor, input.ctensor, weight.ctensor, bias.ctensor, pad)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func ConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctransposed := int32(0)
if transposed {
ctransposed = int32(1)
}
lib.AtgConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctransposed := int32(0)
if transposed {
ctransposed = int32(1)
}
lib.AtgConvolutionOverrideable(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CopySparseToSparse_(src *Tensor, nonBlocking bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
lib.AtgCopySparseToSparse_(ptr, ts.ctensor, src.ctensor, cnonBlocking)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Copysign(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysign(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Copysign_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysign_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) CopysignOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysignOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CopysignScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysignScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CopysignScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysignScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) CopysignScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCopysignScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Corrcoef(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCorrcoef(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cos(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCos(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cos_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCos_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) CosOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cosh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cosh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) CoshOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCoshOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosineEmbeddingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCosineSimilarity(ptr, x1.ctensor, x2.ctensor, dim, eps)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CountNonzero(dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgCountNonzero(ptr, ts.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CountNonzeroDimIntlist(dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCountNonzeroDimIntlist(ptr, ts.ctensor, dim, len(dim))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cov(correction int64, fweights *Tensor, aweights *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCov(ptr, ts.ctensor, correction, fweights.ctensor, aweights.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cross(other *Tensor, dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgCross(ptr, ts.ctensor, other.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CrossEntropyLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, labelSmoothing float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCrossEntropyLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, labelSmoothing)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CrossOut(out *Tensor, other *Tensor, dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CrowIndices(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCrowIndices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
czeroInfinity := int32(0)
if zeroInfinity {
czeroInfinity = int32(1)
}
lib.AtgCtcLoss(ptr, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, reduction, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
czeroInfinity := int32(0)
if zeroInfinity {
czeroInfinity = int32(1)
}
lib.AtgCtcLossTensor(ptr, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, reduction, czeroInfinity)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnAffineGridGenerator(ptr, theta.ctensor, n, c, h, w)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnAffineGridGeneratorBackward(ptr, grad.ctensor, n, c, h, w)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgCudnnBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
return retVal0, retVal1, retVal2, retVal3, err
}
func CudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCudnnBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon, reserveSpace.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) CudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cbenchmark := int32(0)
if benchmark {
cbenchmark = int32(1)
}
cdeterministic := int32(0)
if deterministic {
cdeterministic = int32(1)
}
callowTf32 := int32(0)
if allowTf32 {
callowTf32 = int32(1)
}
lib.AtgCudnnConvolution(ptr, ts.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CudnnConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnConvolutionAddRelu(ptr, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CudnnConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnConvolutionRelu(ptr, ts.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cbenchmark := int32(0)
if benchmark {
cbenchmark = int32(1)
}
cdeterministic := int32(0)
if deterministic {
cdeterministic = int32(1)
}
callowTf32 := int32(0)
if allowTf32 {
callowTf32 = int32(1)
}
lib.AtgCudnnConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CudnnGridSampler(grid *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCudnnGridSampler(ptr, ts.ctensor, grid.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCudnnGridSamplerBackward(ctensorPtr0, ts.ctensor, grid.ctensor, gradOutput.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) CudnnIsAcceptable(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgCudnnIsAcceptable(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) Cummax(dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCummax(ctensorPtr0, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) CummaxOut(values *Tensor, indices *Tensor, dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCummaxOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func CummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCummaxminBackward(ptr, grad.ctensor, input.ctensor, indices.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cummin(dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCummin(ctensorPtr0, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) CumminOut(values *Tensor, indices *Tensor, dim int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgCumminOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Cumprod(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumprod(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cumprod_(dim int64, dtype gotch.DType) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumprod_(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func CumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumprodBackward(ptr, grad.ctensor, input.ctensor, dim, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) CumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumprodOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cumsum(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumsum(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Cumsum_(dim int64, dtype gotch.DType) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumsum_(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) CumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumsumOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CumulativeTrapezoid(y *Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumulativeTrapezoid(ptr, y.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func CumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgCumulativeTrapezoidX(ptr, y.ctensor, x.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Data(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgData(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Deg2rad(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDeg2rad(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Deg2rad_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDeg2rad_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Deg2radOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDeg2radOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DenseDim(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgDenseDim(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) Dequantize(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDequantize(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Det(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDet(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Detach(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDetach(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Detach_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDetach_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Diag(diagonal int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiag(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func DiagBackward(grad *Tensor, inputSizes []int64, diagonal int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagBackward(ptr, grad.ctensor, inputSizes, len(inputSizes), diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagEmbed(ptr, ts.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DiagOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagOut(ptr, out.ctensor, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Diagflat(offset int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagflat(ptr, ts.ctensor, offset)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagonal(ptr, ts.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func DiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagonalBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DiagonalScatter(src *Tensor, offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiagonalScatter(ptr, ts.ctensor, src.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Diff(n int64, dim int64, prepend *Tensor, append *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiff(ptr, ts.ctensor, n, dim, prepend.ctensor, append.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, append *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiffOut(ptr, out.ctensor, ts.ctensor, n, dim, prepend.ctensor, append.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Digamma(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDigamma(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Digamma_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDigamma_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) DigammaOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDigammaOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Dist(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDist(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Div(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiv(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Div_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDiv_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) DivOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivOutMode(out *Tensor, other *Tensor, roundingMode string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) DivScalarMode(other *Scalar, roundingMode string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivScalarMode_(other *Scalar, roundingMode string) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) DivTensorMode(other *Tensor, roundingMode string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivTensorMode_(other *Tensor, roundingMode string) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Divide(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivide(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Divide_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivide_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) DivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivideOutMode(out *Tensor, other *Tensor, roundingMode string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivideScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivideScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) DivideScalarMode(other *Scalar, roundingMode string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivideScalarMode_(other *Scalar, roundingMode string) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) DivideTensorMode(other *Tensor, roundingMode string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DivideTensorMode_(other *Tensor, roundingMode string) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDivideTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Dot(tensor *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDot(ptr, ts.ctensor, tensor.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) DotOut(out *Tensor, tensor *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgDotOut(ptr, out.ctensor, ts.ctensor, tensor.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Dropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgDropout(ptr, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Dropout_(p float64, train bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgDropout_(ptr, ts.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func Dstack(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgDstack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func DstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgDstackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Eig(eigenvectors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ceigenvectors := int32(0)
if eigenvectors {
ceigenvectors = int32(1)
}
lib.AtgEig(ctensorPtr0, ts.ctensor, ceigenvectors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) EigE(e *Tensor, v *Tensor, eigenvectors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ceigenvectors := int32(0)
if eigenvectors {
ceigenvectors = int32(1)
}
lib.AtgEigE(ctensorPtr0, e.ctensor, v.ctensor, ts.ctensor, ceigenvectors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func Einsum(equation string, tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgEinsum(ptr, equation, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Elu(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgElu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Elu_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgElu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func EluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cisResult := int32(0)
if isResult {
cisResult = int32(1)
}
lib.AtgEluBackward(ptr, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cisResult := int32(0)
if isResult {
cisResult = int32(1)
}
lib.AtgEluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) EluOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Embedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
csparse := int32(0)
if sparse {
csparse = int32(1)
}
lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
csparse := int32(0)
if sparse {
csparse = int32(1)
}
lib.AtgEmbeddingBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq, csparse)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
csparse := int32(0)
if sparse {
csparse = int32(1)
}
cincludeLastOffset := int32(0)
if includeLastOffset {
cincludeLastOffset = int32(1)
}
lib.AtgEmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
return retVal0, retVal1, retVal2, retVal3, err
}
func EmbeddingBagPaddingIdx(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx []int64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
csparse := int32(0)
if sparse {
csparse = int32(1)
}
cincludeLastOffset := int32(0)
if includeLastOffset {
cincludeLastOffset = int32(1)
}
var cpaddingIdxVal int64 = 0
var cpaddingIdxNull int = 1
if len(paddingIdx) > 0 {
cpaddingIdxVal = paddingIdx[0]
cpaddingIdxNull = 0
}
lib.AtgEmbeddingBagPaddingIdx(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, cpaddingIdxVal, cpaddingIdxNull)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
return retVal0, retVal1, retVal2, retVal3, err
}
func EmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
lib.AtgEmbeddingDenseBackward(ptr, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) EmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmbeddingRenorm_(ptr, ts.ctensor, indices.ctensor, maxNorm, normType)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func EmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cscaleGradByFreq := int32(0)
if scaleGradByFreq {
cscaleGradByFreq = int32(1)
}
lib.AtgEmbeddingSparseBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Empty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmpty(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) EmptyLike(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmptyLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EmptyOut(out *Tensor, size []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmptyOut(ptr, out.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmptyQuantized(ptr, size, len(size), qtensor.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEmptyStrided(ptr, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Eq(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEq(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Eq_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEq_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) EqScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEqScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) EqTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEqTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) EqTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEqTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) EqTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEqTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Equal(other *Tensor, del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgEqual(ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) Erf(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Erf_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErf_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ErfOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Erfc(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Erfc_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfc_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ErfcOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfcOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Erfinv(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfinv(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Erfinv_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfinv_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ErfinvOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgErfinvOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Exp(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Exp2(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp2(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Exp2_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp2_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Exp2Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp2Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Exp_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExp_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ExpOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Expand(size []int64, implicit bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cimplicit := int32(0)
if implicit {
cimplicit = int32(1)
}
lib.AtgExpand(ptr, ts.ctensor, size, len(size), cimplicit)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ExpandAs(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpandAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Expm1(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpm1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Expm1_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpm1_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Expm1Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExpm1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Exponential_(lambd float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgExponential_(ptr, ts.ctensor, lambd)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func Eye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEye(ptr, n, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEyeM(ptr, n, m, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EyeMOut(out *Tensor, n int64, m int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEyeMOut(ptr, out.ctensor, n, m)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func EyeOut(out *Tensor, n int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgEyeOut(ptr, out.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFakeQuantizePerChannelAffineCachemask(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func FakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerChannelAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerTensorAffine(ptr, ts.ctensor, scale, zeroPoint, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFakeQuantizePerTensorAffineCachemask(ctensorPtr0, ts.ctensor, scale, zeroPoint, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func FakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerTensorAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFakeQuantizePerTensorAffineTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmLinearFp16Weight(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmLinearFp16WeightFp32Activation(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmLinearInt8Weight(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmLinearInt8WeightFp32Activation(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FbgemmPackGemmMatrixFp16(input *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmPackGemmMatrixFp16(ptr, input.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FbgemmPackQuantizedMatrix(input *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmPackQuantizedMatrix(ptr, input.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFbgemmPackQuantizedMatrixKn(ptr, input.ctensor, k, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FeatureAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgFeatureAlphaDropout(ptr, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FeatureAlphaDropout_(p float64, train bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgFeatureAlphaDropout_(ptr, ts.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func FeatureDropout(input *Tensor, p float64, train bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgFeatureDropout(ptr, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FeatureDropout_(p float64, train bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgFeatureDropout_(ptr, ts.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FftFft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftFft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftFft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftFft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftFftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftFftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FftFftfreqOut(out *Tensor, n int64, d float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFftfreqOut(ptr, out.ctensor, n, d)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftFftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftFftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftFftshift(dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftFftshift(ptr, ts.ctensor, dim, len(dim))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftHfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftHfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftHfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftHfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftHfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftHfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftHfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftHfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftHfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftHfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftHfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftHfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIfftshift(dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIfftshift(ptr, ts.ctensor, dim, len(dim))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIhfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIhfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIhfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIhfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIhfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIhfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIhfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIhfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIhfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIhfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIhfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIrfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIrfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIrfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIrfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIrfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftIrfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIrfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIrfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftIrfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftRfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftRfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftRfft2(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftRfft2(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftRfft2Out(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftRfftOut(out *Tensor, n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
lib.AtgFftRfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftRfftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FftRfftfreqOut(out *Tensor, n int64, d float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftRfftfreqOut(ptr, out.ctensor, n, d)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftRfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftRfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFftRfftnOut(ptr, out.ctensor, ts.ctensor, s, len(s), dim, len(dim), norm)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Fill_(value *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFill_(ptr, ts.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FillDiagonal_(fillValue *Scalar, wrap bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cwrap := int32(0)
if wrap {
cwrap = int32(1)
}
lib.AtgFillDiagonal_(ptr, ts.ctensor, fillValue.cscalar, cwrap)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FillTensor_(value *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFillTensor_(ptr, ts.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Fix(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFix(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Fix_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFix_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FixOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFixOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Flatten(startDim int64, endDim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFlatten(ptr, ts.ctensor, startDim, endDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FlattenDenseTensors(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgFlattenDenseTensors(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Flip(dims []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFlip(ptr, ts.ctensor, dims, len(dims))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Fliplr(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFliplr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Flipud(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFlipud(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FloatPower(exponent *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPower(ptr, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FloatPower_(exponent *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPower_(ptr, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func FloatPowerScalar(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerScalar(ptr, selfScalar.cscalar, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FloatPowerTensor_(exponent *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerTensor_(ptr, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FloatPowerTensorScalar(exponent *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerTensorScalar(ptr, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloatPowerTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Floor(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloor(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Floor_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloor_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FloorDivide(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivide(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FloorDivide_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivide_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FloorDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FloorDivideScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivideScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FloorDivideScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorDivideScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FloorOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFloorOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Fmax(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmax(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FmaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Fmin(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmin(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FminOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFminOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Fmod(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmod(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Fmod_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmod_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FmodScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmodScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FmodTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmodTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FmodTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmodTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FmodTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFmodTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Frac(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFrac(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Frac_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFrac_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) FracOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFracOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FractionalMaxPool2d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFractionalMaxPool2d(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) FractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFractionalMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFractionalMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFractionalMaxPool2dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) FractionalMaxPool3d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFractionalMaxPool3d(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) FractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFractionalMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFractionalMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFractionalMaxPool3dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), randomSamples.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Frexp(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFrexp(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) FrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgFrexpTensorOut(ctensorPtr0, mantissa.ctensor, exponent.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) FrobeniusNorm(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFrobeniusNorm(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FrobeniusNormDim(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgFrobeniusNormDim(ptr, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgFrobeniusNormOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cshared := int32(0)
if shared {
cshared = int32(1)
}
var csizeVal int64 = 0
var csizeNull int = 1
if len(size) > 0 {
csizeVal = size[0]
csizeNull = 0
}
lib.AtgFromFile(ptr, filename, cshared, csizeVal, csizeNull, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Full(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFull(ptr, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FullLike(fillValue *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFullLike(ptr, ts.ctensor, fillValue.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func FullOut(out *Tensor, size []int64, fillValue *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgFullOut(ptr, out.ctensor, size, len(size), fillValue.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) FusedMovingAvgObsFakeQuant(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperRowFakeQuant := int32(0)
if perRowFakeQuant {
cperRowFakeQuant = int32(1)
}
csymmetricQuant := int32(0)
if symmetricQuant {
csymmetricQuant = int32(1)
}
lib.AtgFusedMovingAvgObsFakeQuant(ptr, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Gather(dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
csparseGrad := int32(0)
if sparseGrad {
csparseGrad = int32(1)
}
lib.AtgGather(ptr, ts.ctensor, dim, index.ctensor, csparseGrad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
csparseGrad := int32(0)
if sparseGrad {
csparseGrad = int32(1)
}
lib.AtgGatherBackward(ptr, grad.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
csparseGrad := int32(0)
if sparseGrad {
csparseGrad = int32(1)
}
lib.AtgGatherOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Gcd(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGcd(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Gcd_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGcd_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GcdOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGcdOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ge(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGe(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ge_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGe_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GeTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GeTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Gelu(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GeluBackward(grad *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeluBackward(ptr, grad.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GeluBackwardGradInput(gradInput *Tensor, grad *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeluBackwardGradInput(ptr, gradInput.ctensor, grad.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GeluOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Geometric_(p float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGeometric_(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Geqrf(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgGeqrf(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) GeqrfA(a *Tensor, tau *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgGeqrfA(ctensorPtr0, a.ctensor, tau.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Ger(vec2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGer(ptr, ts.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GerOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGerOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Glu(dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGlu(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GluBackward(gradOutput *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluBackward(ptr, gradOutput.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GluOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGluOut(ptr, out.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Grad(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGrad(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Greater(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreater(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Greater_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreater_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GreaterEqual(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqual(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GreaterEqual_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqual_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GreaterEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GreaterEqualTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqualTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GreaterEqualTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqualTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GreaterEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GreaterScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GreaterTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GreaterTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GreaterTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGreaterTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func GridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
lib.AtgGridSampler(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func GridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
lib.AtgGridSampler2d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func GridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
lib.AtgGridSampler3d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func GridSampler3dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
lib.AtgGridSampler3dBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func GroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccudnnEnabled := int32(0)
if cudnnEnabled {
ccudnnEnabled = int32(1)
}
lib.AtgGroupNorm(ptr, input.ctensor, numGroups, weight.ctensor, bias.ctensor, eps, ccudnnEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Gru(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {
cparams = append(cparams, t.ctensor)
}
chasBiases := int32(0)
if hasBiases {
chasBiases = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
lib.AtgGru(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func GruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func GruData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {
cparams = append(cparams, t.ctensor)
}
chasBiases := int32(0)
if hasBiases {
chasBiases = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
lib.AtgGruData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Gt(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGt(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Gt_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGt_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GtScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GtTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGtTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) GtTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGtTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) GtTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgGtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func HammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHammingWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func HammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic {
cperiodic = int32(1)
}
lib.AtgHammingWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func HammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic {
cperiodic = int32(1)
}
lib.AtgHammingWindowPeriodicAlpha(ptr, windowLength, cperiodic, alpha, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func HammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic {
cperiodic = int32(1)
}
lib.AtgHammingWindowPeriodicAlphaBeta(ptr, windowLength, cperiodic, alpha, beta, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func HannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHannWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func HannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic {
cperiodic = int32(1)
}
lib.AtgHannWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hardshrink(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardshrink(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardshrinkBackward(ptr, gradOut.ctensor, ts.ctensor, lambd.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOut.ctensor, ts.ctensor, lambd.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HardshrinkOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardshrinkOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hardsigmoid(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoid(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hardsigmoid_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoid_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) HardsigmoidBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HardsigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardsigmoidOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hardswish(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswish(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hardswish_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswish_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) HardswishBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswishBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HardswishOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardswishOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hardtanh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hardtanh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) HardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanhBackward(ptr, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HardtanhOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHardtanhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Heaviside(values *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHeaviside(ptr, ts.ctensor, values.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Heaviside_(values *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHeaviside_(ptr, ts.ctensor, values.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) HeavisideOut(out *Tensor, values *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHeavisideOut(ptr, out.ctensor, ts.ctensor, values.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHingeEmbeddingLoss(ptr, ts.ctensor, target.ctensor, margin, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Histc(bins int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHistc(ptr, ts.ctensor, bins)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HistcOut(out *Tensor, bins int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHistcOut(ptr, out.ctensor, ts.ctensor, bins)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Hspmm(mat1 *Tensor, mat2 *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHspmm(ptr, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func HspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHspmmOut(ptr, out.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Hstack(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgHstack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func HstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgHstackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HuberLoss(target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHuberLoss(ptr, ts.ctensor, target.ctensor, reduction, delta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HuberLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHuberLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHuberLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) HuberLossOut(out *Tensor, target *Tensor, reduction int64, delta float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHuberLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, delta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hypot(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHypot(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Hypot_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHypot_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) HypotOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgHypotOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) I0(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgI0(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) I0_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgI0_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) I0Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgI0Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Igamma(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgamma(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Igamma_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgamma_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) IgammaOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgammaOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Igammac(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgammac(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Igammac_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgammac_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) IgammacOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIgammacOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Im2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIm2col(ptr, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Im2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIm2colBackward(ptr, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Im2colBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIm2colBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Im2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIm2colOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Imag(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgImag(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IndexAdd(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexAdd(ptr, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IndexAdd_(dim int64, index *Tensor, source *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexAdd_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) IndexAddOut(out *Tensor, dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IndexCopy(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexCopy(ptr, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IndexCopy_(dim int64, index *Tensor, source *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) IndexFill(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFill(ptr, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IndexFill_(dim int64, index *Tensor, value *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFill_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) IndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFillIntTensor(ptr, ts.ctensor, dim, index.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IndexFillIntTensor_(dim int64, index *Tensor, value *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexFillIntTensor_(ptr, ts.ctensor, dim, index.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) IndexSelect(dim int64, index *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func IndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexSelectBackward(ptr, grad.ctensor, selfSizes, len(selfSizes), dim, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndexSelectOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Indices(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIndices(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) InfinitelyDifferentiableGeluBackward(grad *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInfinitelyDifferentiableGeluBackward(ptr, grad.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Inner(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInner(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) InnerOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInnerOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func InstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cuseInputStats := int32(0)
if useInputStats {
cuseInputStats = int32(1)
}
ccudnnEnabled := int32(0)
if cudnnEnabled {
ccudnnEnabled = int32(1)
}
lib.AtgInstanceNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, cuseInputStats, momentum, eps, ccudnnEnabled)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IntRepr(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIntRepr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Inverse(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInverse(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) InverseOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgInverseOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IsCoalesced(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsCoalesced(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsComplex(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsComplex(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsConj(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsConj(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsDistributed(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsDistributed(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsFloatingPoint(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsFloatingPoint(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsInference(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsInference(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsLeaf(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsLeaf(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsNeg(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsNeg(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsNonzero(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsNonzero(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsPinned(device gotch.Device, del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsPinned(ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsSameSize(other *Tensor, del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsSameSize(ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsSetTo(tensor *Tensor, del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsSetTo(ts.ctensor, tensor.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) IsSigned(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgIsSigned(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func IsVulkanAvailable() (retVal bool, err error) {
retVal = lib.AtgIsVulkanAvailable()
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) Isclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cequalNan := int32(0)
if equalNan {
cequalNan = int32(1)
}
lib.AtgIsclose(ptr, ts.ctensor, other.ctensor, rtol, atol, cequalNan)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Isfinite(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsfinite(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Isin(elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique {
cassumeUnique = int32(1)
}
cinvert := int32(0)
if invert {
cinvert = int32(1)
}
lib.AtgIsin(ptr, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func IsinScalarTensor(element *Scalar, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique {
cassumeUnique = int32(1)
}
cinvert := int32(0)
if invert {
cinvert = int32(1)
}
lib.AtgIsinScalarTensor(ptr, element.cscalar, testElements.ctensor, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func IsinScalarTensorOut(out *Tensor, element *Scalar, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique {
cassumeUnique = int32(1)
}
cinvert := int32(0)
if invert {
cinvert = int32(1)
}
lib.AtgIsinScalarTensorOut(ptr, out.ctensor, element.cscalar, testElements.ctensor, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func IsinTensorScalar(elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique {
cassumeUnique = int32(1)
}
cinvert := int32(0)
if invert {
cinvert = int32(1)
}
lib.AtgIsinTensorScalar(ptr, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func IsinTensorScalarOut(out *Tensor, elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique {
cassumeUnique = int32(1)
}
cinvert := int32(0)
if invert {
cinvert = int32(1)
}
lib.AtgIsinTensorScalarOut(ptr, out.ctensor, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func IsinTensorTensorOut(out *Tensor, elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cassumeUnique := int32(0)
if assumeUnique {
cassumeUnique = int32(1)
}
cinvert := int32(0)
if invert {
cinvert = int32(1)
}
lib.AtgIsinTensorTensorOut(ptr, out.ctensor, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Isinf(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsinf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Isnan(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsnan(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Isneginf(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsneginf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IsneginfOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsneginfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Isposinf(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsposinf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) IsposinfOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsposinfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Isreal(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgIsreal(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Istft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var chopLengthVal int64 = 0
var chopLengthNull int = 1
if len(hopLength) > 0 {
chopLengthVal = hopLength[0]
chopLengthNull = 0
}
var cwinLengthVal int64 = 0
var cwinLengthNull int = 1
if len(winLength) > 0 {
cwinLengthVal = winLength[0]
cwinLengthNull = 0
}
ccenter := int32(0)
if center {
ccenter = int32(1)
}
cnormalized := int32(0)
if normalized {
cnormalized = int32(1)
}
conesided := int32(0)
if onesided {
conesided = int32(1)
}
var clengthVal int64 = 0
var clengthNull int = 1
if len(length) > 0 {
clengthVal = length[0]
clengthNull = 0
}
creturnComplex := int32(0)
if returnComplex {
creturnComplex = int32(1)
}
lib.AtgIstft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, ccenter, cnormalized, conesided, clengthVal, clengthNull, creturnComplex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func KaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgKaiserWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func KaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic {
cperiodic = int32(1)
}
lib.AtgKaiserWindowBeta(ptr, windowLength, cperiodic, beta, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func KaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cperiodic := int32(0)
if periodic {
cperiodic = int32(1)
}
lib.AtgKaiserWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) KlDiv(target *Tensor, reduction int64, logTarget bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
clogTarget := int32(0)
if logTarget {
clogTarget = int32(1)
}
lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction, clogTarget)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) KlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, logTarget bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
clogTarget := int32(0)
if logTarget {
clogTarget = int32(1)
}
lib.AtgKlDivBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, clogTarget)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Kron(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgKron(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) KronOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgKronOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Kthvalue(k int64, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgKthvalue(ctensorPtr0, ts.ctensor, k, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) KthvalueValues(values *Tensor, indices *Tensor, k int64, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgKthvalueValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) L1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgL1Loss(ptr, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) L1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) L1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgL1LossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) L1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ccudnnEnable := int32(0)
if cudnnEnable {
ccudnnEnable = int32(1)
}
lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps, ccudnnEnable)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Lcm(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLcm(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Lcm_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLcm_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LcmOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLcmOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ldexp(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLdexp(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ldexp_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLdexp_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LdexpOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLdexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Le(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLe(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Le_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLe_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LeTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LeTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LeakyRelu(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeakyRelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LeakyRelu_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeakyRelu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cselfIsResult := int32(0)
if selfIsResult {
cselfIsResult = int32(1)
}
lib.AtgLeakyReluBackward(ptr, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cselfIsResult := int32(0)
if selfIsResult {
cselfIsResult = int32(1)
}
lib.AtgLeakyReluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LeakyReluOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLeakyReluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Lerp(end *Tensor, weight *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerp(ptr, ts.ctensor, end.ctensor, weight.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Lerp_(end *Tensor, weight *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerp_(ptr, ts.ctensor, end.ctensor, weight.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerpScalarOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LerpTensor(end *Tensor, weight *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerpTensor(ptr, ts.ctensor, end.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LerpTensor_(end *Tensor, weight *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerpTensor_(ptr, ts.ctensor, end.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLerpTensorOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Less(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLess(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Less_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLess_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LessEqual(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqual(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LessEqual_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqual_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LessEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LessEqualTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqualTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LessEqualTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqualTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LessEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LessScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LessTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LessTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LessTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLessTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Lgamma(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLgamma(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Lgamma_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLgamma_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LgammaOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLgammaOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgCholesky(upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgLinalgCholesky(ptr, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgCholeskyEx(upper bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
ccheckErrors := int32(0)
if checkErrors {
ccheckErrors = int32(1)
}
lib.AtgLinalgCholeskyEx(ctensorPtr0, ts.ctensor, cupper, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgCholeskyExL(l *Tensor, info *Tensor, upper bool, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
ccheckErrors := int32(0)
if checkErrors {
ccheckErrors = int32(1)
}
lib.AtgLinalgCholeskyExL(ctensorPtr0, l.ctensor, info.ctensor, ts.ctensor, cupper, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgCholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgLinalgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgCond(p *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCond(ptr, ts.ctensor, p.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgCondOut(out *Tensor, p *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCondOut(ptr, out.ctensor, ts.ctensor, p.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgCondPStr(p string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCondPStr(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgCondPStrOut(out *Tensor, p string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCondPStrOut(ptr, out.ctensor, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgCross(other *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCross(ptr, ts.ctensor, other.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgCrossOut(out *Tensor, other *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgDet(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgDet(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgDetOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgDetOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgDiagonal(a *Tensor, offset int64, dim1 int64, dim2 int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgDiagonal(ptr, a.ctensor, offset, dim1, dim2)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgEig(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgEig(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgEigOut(ctensorPtr0, eigenvalues.ctensor, eigenvectors.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgEigh(uPLO string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgEigh(ctensorPtr0, ts.ctensor, uPLO)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgEighEigvals(ctensorPtr0, eigvals.ctensor, eigvecs.ctensor, ts.ctensor, uPLO)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgEigvals(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgEigvals(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgEigvalsOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgEigvalsOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgEigvalsh(uPLO string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgEigvalsh(ptr, ts.ctensor, uPLO)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgEigvalshOut(out *Tensor, uPLO string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgEigvalshOut(ptr, out.ctensor, ts.ctensor, uPLO)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgHouseholderProduct(input *Tensor, tau *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgHouseholderProduct(ptr, input.ctensor, tau.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgHouseholderProductOut(ptr, out.ctensor, input.ctensor, tau.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgInv(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgInv(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgInvEx(checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ccheckErrors := int32(0)
if checkErrors {
ccheckErrors = int32(1)
}
lib.AtgLinalgInvEx(ctensorPtr0, ts.ctensor, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgInvExInverse(inverse *Tensor, info *Tensor, checkErrors bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ccheckErrors := int32(0)
if checkErrors {
ccheckErrors = int32(1)
}
lib.AtgLinalgInvExInverse(ctensorPtr0, inverse.ctensor, info.ctensor, ts.ctensor, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgInvOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgInvOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgLstsq(b *Tensor, rcond []float64, driver string, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
var crcondVal float64 = 0.0
var crcondNull int = 1
if len(rcond) > 0 {
crcondVal = rcond[0]
crcondNull = 0
}
lib.AtgLinalgLstsq(ctensorPtr0, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
return retVal0, retVal1, retVal2, retVal3, err
}
func (ts *Tensor) LinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tensor, singularValues *Tensor, b *Tensor, rcond []float64, driver string, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
var crcondVal float64 = 0.0
var crcondNull int = 1
if len(rcond) > 0 {
crcondVal = rcond[0]
crcondNull = 0
}
lib.AtgLinalgLstsqOut(ctensorPtr0, solution.ctensor, residuals.ctensor, rank.ctensor, singularValues.ctensor, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
return retVal0, retVal1, retVal2, retVal3, err
}
func LinalgLuFactor(a *Tensor, pivot bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot {
cpivot = int32(1)
}
lib.AtgLinalgLuFactor(ctensorPtr0, a.ctensor, cpivot)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func LinalgLuFactorEx(a *Tensor, pivot bool, checkErrors bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot {
cpivot = int32(1)
}
ccheckErrors := int32(0)
if checkErrors {
ccheckErrors = int32(1)
}
lib.AtgLinalgLuFactorEx(ctensorPtr0, a.ctensor, cpivot, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func LinalgLuFactorExOut(lU *Tensor, pivots *Tensor, info *Tensor, a *Tensor, pivot bool, checkErrors bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot {
cpivot = int32(1)
}
ccheckErrors := int32(0)
if checkErrors {
ccheckErrors = int32(1)
}
lib.AtgLinalgLuFactorExOut(ctensorPtr0, lU.ctensor, pivots.ctensor, info.ctensor, a.ctensor, cpivot, ccheckErrors)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func LinalgLuFactorOut(lU *Tensor, pivots *Tensor, a *Tensor, pivot bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cpivot := int32(0)
if pivot {
cpivot = int32(1)
}
lib.AtgLinalgLuFactorOut(ctensorPtr0, lU.ctensor, pivots.ctensor, a.ctensor, cpivot)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgMatmul(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatmul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgMatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgMatrixExp(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatrixExp(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgMatrixPower(n int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatrixPower(ptr, ts.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgMatrixPowerOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgMatrixRank(tol float64, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgMatrixRank(ptr, ts.ctensor, tol, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgMatrixRankAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var catolVal float64 = 0.0
var catolNull int = 1
if len(atol) > 0 {
catolVal = atol[0]
catolNull = 0
}
var crtolVal float64 = 0.0
var crtolNull int = 1
if len(rtol) > 0 {
crtolVal = rtol[0]
crtolNull = 0
}
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgMatrixRankAtolRtolFloat(ptr, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgMatrixRankAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var catolVal float64 = 0.0
var catolNull int = 1
if len(atol) > 0 {
catolVal = atol[0]
catolNull = 0
}
var crtolVal float64 = 0.0
var crtolNull int = 1
if len(rtol) > 0 {
crtolVal = rtol[0]
crtolNull = 0
}
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgMatrixRankAtolRtolFloatOut(ptr, out.ctensor, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgMatrixRankAtolRtolTensor(input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgMatrixRankAtolRtolTensor(ptr, input.ctensor, atol.ctensor, rtol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgMatrixRankAtolRtolTensorOut(out *Tensor, input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgMatrixRankAtolRtolTensorOut(ptr, out.ctensor, input.ctensor, atol.ctensor, rtol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgMatrixRankOut(out *Tensor, tol float64, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgMatrixRankOut(ptr, out.ctensor, ts.ctensor, tol, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermitian bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgMatrixRankOutTolTensor(ptr, out.ctensor, input.ctensor, tol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgMatrixRankTolTensor(ptr, input.ctensor, tol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgMultiDot(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgLinalgMultiDot(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgMultiDotOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgLinalgMultiDotOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgLinalgNorm(ptr, ts.ctensor, ord.cscalar, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgNormOrdStr(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgLinalgNormOrdStr(ptr, ts.ctensor, ord, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgNormOrdStrOut(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgLinalgNormOrdStrOut(ptr, out.ctensor, ts.ctensor, ord, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgLinalgNormOut(ptr, out.ctensor, ts.ctensor, ord.cscalar, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgPinv(rcond float64, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgPinv(ptr, ts.ctensor, rcond, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgPinvAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var catolVal float64 = 0.0
var catolNull int = 1
if len(atol) > 0 {
catolVal = atol[0]
catolNull = 0
}
var crtolVal float64 = 0.0
var crtolNull int = 1
if len(rtol) > 0 {
crtolVal = rtol[0]
crtolNull = 0
}
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgPinvAtolRtolFloat(ptr, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgPinvAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var catolVal float64 = 0.0
var catolNull int = 1
if len(atol) > 0 {
catolVal = atol[0]
catolNull = 0
}
var crtolVal float64 = 0.0
var crtolNull int = 1
if len(rtol) > 0 {
crtolVal = rtol[0]
crtolNull = 0
}
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgPinvAtolRtolFloatOut(ptr, out.ctensor, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgPinvAtolRtolTensor(atol *Tensor, rtol *Tensor, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgPinvAtolRtolTensor(ptr, ts.ctensor, atol.ctensor, rtol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgPinvAtolRtolTensorOut(out *Tensor, atol *Tensor, rtol *Tensor, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgPinvAtolRtolTensorOut(ptr, out.ctensor, ts.ctensor, atol.ctensor, rtol.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgPinvOut(ptr, out.ctensor, ts.ctensor, rcond, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgPinvOutRcondTensor(ptr, out.ctensor, ts.ctensor, rcond.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
chermitian := int32(0)
if hermitian {
chermitian = int32(1)
}
lib.AtgLinalgPinvRcondTensor(ptr, ts.ctensor, rcond.ctensor, chermitian)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgQr(mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgQr(ctensorPtr0, ts.ctensor, mode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgQrOut(q *Tensor, r *Tensor, mode string, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgQrOut(ctensorPtr0, q.ctensor, r.ctensor, ts.ctensor, mode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgSlogdet(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgSlogdet(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLinalgSlogdetOut(ctensorPtr0, sign.ctensor, logabsdet.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func LinalgSolve(input *Tensor, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgSolve(ptr, input.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgSolveOut(out *Tensor, input *Tensor, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgSolveOut(ptr, out.ctensor, input.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgSolveTriangular(b *Tensor, upper bool, left bool, unitriangular bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
cleft := int32(0)
if left {
cleft = int32(1)
}
cunitriangular := int32(0)
if unitriangular {
cunitriangular = int32(1)
}
lib.AtgLinalgSolveTriangular(ptr, ts.ctensor, b.ctensor, cupper, cleft, cunitriangular)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgSolveTriangularOut(out *Tensor, b *Tensor, upper bool, left bool, unitriangular bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
cleft := int32(0)
if left {
cleft = int32(1)
}
cunitriangular := int32(0)
if unitriangular {
cunitriangular = int32(1)
}
lib.AtgLinalgSolveTriangularOut(ptr, out.ctensor, ts.ctensor, b.ctensor, cupper, cleft, cunitriangular)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgSvd(a *Tensor, fullMatrices bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cfullMatrices := int32(0)
if fullMatrices {
cfullMatrices = int32(1)
}
lib.AtgLinalgSvd(ctensorPtr0, a.ctensor, cfullMatrices)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cfullMatrices := int32(0)
if fullMatrices {
cfullMatrices = int32(1)
}
lib.AtgLinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, a.ctensor, cfullMatrices)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func LinalgSvdvals(a *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgSvdvals(ptr, a.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinalgSvdvalsOut(out *Tensor, a *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgSvdvalsOut(ptr, out.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgTensorinv(ind int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgTensorinv(ptr, ts.ctensor, ind)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgTensorinvOut(out *Tensor, ind int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgTensorinvOut(ptr, out.ctensor, ts.ctensor, ind)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgTensorsolve(other *Tensor, dims []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgTensorsolve(ptr, ts.ctensor, other.ctensor, dims, len(dims))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinalgTensorsolveOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dims, len(dims))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Linear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinearOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Linspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinspace(ptr, start.cscalar, end.cscalar, steps, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Log(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Log10(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog10(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Log10_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog10_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Log10Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog10Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Log1p(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog1p(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Log1p_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog1p_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Log1pOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog1pOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Log2(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog2(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Log2_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog2_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Log2Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog2Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Log_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLog_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LogNormal_(mean float64, std float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogNormal_(ptr, ts.ctensor, mean, std)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LogOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogSigmoid(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSigmoid(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor, buffer.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, buffer.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogSigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSigmoidOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Logaddexp(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogaddexp(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Logaddexp2(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogaddexp2(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Logaddexp2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogaddexp2Out(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogaddexpOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogaddexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Logcumsumexp(dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogcumsumexp(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogcumsumexpOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Logdet(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogdet(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogicalAnd(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalAnd(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogicalAnd_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalAnd_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LogicalAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogicalNot(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalNot(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogicalNot_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalNot_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LogicalNotOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalNotOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogicalOr(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalOr(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogicalOr_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalOr_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LogicalOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogicalXor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalXor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogicalXor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalXor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LogicalXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogicalXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Logit(eps []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogit(ptr, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Logit_(eps []float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogit_(ptr, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LogitBackward(gradOutput *Tensor, eps []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogitBackward(ptr, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogitBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogitOut(out *Tensor, eps []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Logspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogspace(ptr, start.cscalar, end.cscalar, steps, base, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps, base)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Logsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Lstm(input *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {
chx = append(chx, t.ctensor)
}
var cparams []lib.Ctensor
for _, t := range params {
cparams = append(cparams, t.ctensor)
}
chasBiases := int32(0)
if hasBiases {
chasBiases = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
lib.AtgLstm(ctensorPtr0, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func LstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {
chx = append(chx, t.ctensor)
}
lib.AtgLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func LstmData(data *Tensor, batchSizes *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {
chx = append(chx, t.ctensor)
}
var cparams []lib.Ctensor
for _, t := range params {
cparams = append(cparams, t.ctensor)
}
chasBiases := int32(0)
if hasBiases {
chasBiases = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
lib.AtgLstmData(ctensorPtr0, data.ctensor, batchSizes.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) Lstsq(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLstsq(ctensorPtr0, ts.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) LstsqX(x *Tensor, qr *Tensor, a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgLstsqX(ctensorPtr0, x.ctensor, qr.ctensor, ts.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Lt(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLt(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Lt_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLt_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LtScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LtTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLtTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LtTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLtTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) LtTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LuSolve(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLuSolve(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) LuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgLuSolveOut(ptr, out.ctensor, ts.ctensor, lUData.ctensor, lUPivots.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func LuUnpack(lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cunpackData := int32(0)
if unpackData {
cunpackData = int32(1)
}
cunpackPivots := int32(0)
if unpackPivots {
cunpackPivots = int32(1)
}
lib.AtgLuUnpack(ctensorPtr0, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func LuUnpackOut(p *Tensor, l *Tensor, u *Tensor, lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
cunpackData := int32(0)
if unpackData {
cunpackData = int32(1)
}
cunpackPivots := int32(0)
if unpackPivots {
cunpackPivots = int32(1)
}
lib.AtgLuUnpackOut(ctensorPtr0, p.ctensor, l.ctensor, u.ctensor, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func MarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMarginRankingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaskedFill(mask *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFill(ptr, ts.ctensor, mask.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaskedFill_(mask *Tensor, value *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFill_(ptr, ts.ctensor, mask.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) MaskedFillTensor(mask *Tensor, value *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFillTensor(ptr, ts.ctensor, mask.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaskedFillTensor_(mask *Tensor, value *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedFillTensor_(ptr, ts.ctensor, mask.ctensor, value.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) MaskedScatter(mask *Tensor, source *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedScatter(ptr, ts.ctensor, mask.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaskedScatter_(mask *Tensor, source *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedScatter_(ptr, ts.ctensor, mask.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) MaskedSelect(mask *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedSelect(ptr, ts.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func MaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedSelectBackward(ptr, grad.ctensor, input.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaskedSelectOut(out *Tensor, mask *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaskedSelectOut(ptr, out.ctensor, ts.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Matmul(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatmul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MatrixExp(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixExp(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MatrixExpBackward(grad *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixExpBackward(ptr, ts.ctensor, grad.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MatrixH(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixH(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MatrixPower(n int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixPower(ptr, ts.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MatrixPowerOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MatrixRank(symmetric bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
csymmetric := int32(0)
if symmetric {
csymmetric = int32(1)
}
lib.AtgMatrixRank(ptr, ts.ctensor, csymmetric)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MatrixRankTol(tol float64, symmetric bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
csymmetric := int32(0)
if symmetric {
csymmetric = int32(1)
}
lib.AtgMatrixRankTol(ptr, ts.ctensor, tol, csymmetric)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Max(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMax(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMaxDimMax(ctensorPtr0, max.ctensor, maxValues.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MaxOther(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxOther(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxPool1dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool1dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxPool2dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool2dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool2dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxPool2dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool2dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxPool2dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool2dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxPool3dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool3dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool3dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxPool3dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool3dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxPool3dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMaxPool3dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MaxUnpool2d(indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnpool2d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnpool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxUnpool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnpool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnpool2dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnpool3d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnpool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxUnpool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnpool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaxUnpool3dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Maximum(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaximum(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MaximumOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMaximumOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mean(dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMean(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MeanDim(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMeanDim(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Median(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMedian(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MedianDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Mh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Min(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMin(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MinDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMinDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMinDimMin(ctensorPtr0, min.ctensor, minIndices.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MinOther(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMinOther(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MinOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMinOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Minimum(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMinimum(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MinimumOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMinimumOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func MiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgMiopenBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func MiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgMiopenBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) MiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cbenchmark := int32(0)
if benchmark {
cbenchmark = int32(1)
}
cdeterministic := int32(0)
if deterministic {
cdeterministic = int32(1)
}
lib.AtgMiopenConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cbenchmark := int32(0)
if benchmark {
cbenchmark = int32(1)
}
cdeterministic := int32(0)
if deterministic {
cdeterministic = int32(1)
}
lib.AtgMiopenConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cbenchmark := int32(0)
if benchmark {
cbenchmark = int32(1)
}
cdeterministic := int32(0)
if deterministic {
cdeterministic = int32(1)
}
lib.AtgMiopenDepthwiseConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func MiopenRnn(input *Tensor, weight []Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr4 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr3)) + unsafe.Sizeof(ctensorPtr0)))
var cweight []lib.Ctensor
for _, t := range weight {
cweight = append(cweight, t.ctensor)
}
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
lib.AtgMiopenRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, hx.ctensor, cx.ctensor, mode, hiddenSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, len(batchSizes), dropoutState.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
retVal3 = newTensor(*ctensorPtr3)
retVal4 = newTensor(*ctensorPtr4)
return retVal0, retVal1, retVal2, retVal3, retVal4, err
}
func (ts *Tensor) Mish(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMish(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mish_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMish_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) MishBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMishBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MishOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMishOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MkldnnAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnAdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MkldnnLinear(weight *Tensor, bias *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnLinear(ptr, ts.ctensor, weight.ctensor, bias.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func MkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnLinearBackwardInput(ptr, inputSize, len(inputSize), gradOutput.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func MkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cbiasDefined := int32(0)
if biasDefined {
cbiasDefined = int32(1)
}
lib.AtgMkldnnLinearBackwardWeights(ctensorPtr0, gradOutput.ctensor, input.ctensor, weight.ctensor, cbiasDefined)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) MkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMkldnnMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func MkldnnMaxPool2dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMkldnnMaxPool2dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMkldnnMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func MkldnnMaxPool3dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgMkldnnMaxPool3dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnReorderConv2dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMkldnnReorderConv3dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mm(mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMm(ptr, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mode(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgMode(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) ModeValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgModeValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Moveaxis(source []int64, destination []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMoveaxis(ptr, ts.ctensor, source, len(source), destination, len(destination))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MoveaxisInt(source int64, destination int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMoveaxisInt(ptr, ts.ctensor, source, destination)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Movedim(source []int64, destination []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMovedim(ptr, ts.ctensor, source, len(source), destination, len(destination))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MovedimInt(source int64, destination int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMovedimInt(ptr, ts.ctensor, source, destination)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MseLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMseLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMseLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MseLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMseLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Msort(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMsort(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MsortOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMsortOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mt(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMt(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mul(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMul(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mul_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMul_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) MulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMulOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MulScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMulScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MulScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMulScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) MultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MultilabelMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultilabelMarginLoss(ptr, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultilabelMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultilabelMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultilabelMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Multinomial(numSamples int64, replacement bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
creplacement := int32(0)
if replacement {
creplacement = int32(1)
}
lib.AtgMultinomial(ptr, ts.ctensor, numSamples, creplacement)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
creplacement := int32(0)
if replacement {
creplacement = int32(1)
}
lib.AtgMultinomialOut(ptr, out.ctensor, ts.ctensor, numSamples, creplacement)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Multiply(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiply(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Multiply_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiply_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) MultiplyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiplyOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MultiplyScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiplyScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MultiplyScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMultiplyScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Mv(vec *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMv(ptr, ts.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) MvOut(out *Tensor, vec *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMvOut(ptr, out.ctensor, ts.ctensor, vec.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mvlgamma(p int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMvlgamma(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Mvlgamma_(p int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMvlgamma_(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) MvlgammaOut(out *Tensor, p int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgMvlgammaOut(ptr, out.ctensor, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NanToNum(nan []float64, posinf []float64, neginf []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnanVal float64 = 0.0
var cnanNull int = 1
if len(nan) > 0 {
cnanVal = nan[0]
cnanNull = 0
}
var cposinfVal float64 = 0.0
var cposinfNull int = 1
if len(posinf) > 0 {
cposinfVal = posinf[0]
cposinfNull = 0
}
var cneginfVal float64 = 0.0
var cneginfNull int = 1
if len(neginf) > 0 {
cneginfVal = neginf[0]
cneginfNull = 0
}
lib.AtgNanToNum(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NanToNum_(nan []float64, posinf []float64, neginf []float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnanVal float64 = 0.0
var cnanNull int = 1
if len(nan) > 0 {
cnanVal = nan[0]
cnanNull = 0
}
var cposinfVal float64 = 0.0
var cposinfNull int = 1
if len(posinf) > 0 {
cposinfVal = posinf[0]
cposinfNull = 0
}
var cneginfVal float64 = 0.0
var cneginfNull int = 1
if len(neginf) > 0 {
cneginfVal = neginf[0]
cneginfNull = 0
}
lib.AtgNanToNum_(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) NanToNumOut(out *Tensor, nan []float64, posinf []float64, neginf []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnanVal float64 = 0.0
var cnanNull int = 1
if len(nan) > 0 {
cnanVal = nan[0]
cnanNull = 0
}
var cposinfVal float64 = 0.0
var cposinfNull int = 1
if len(posinf) > 0 {
cposinfVal = posinf[0]
cposinfNull = 0
}
var cneginfVal float64 = 0.0
var cneginfNull int = 1
if len(neginf) > 0 {
cneginfVal = neginf[0]
cneginfNull = 0
}
lib.AtgNanToNumOut(ptr, out.ctensor, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Nanmean(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNanmean(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NanmeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNanmeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Nanmedian(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNanmedian(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NanmedianDim(dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNanmedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) NanmedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNanmedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Nanquantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNanquantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NanquantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNanquantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NanquantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNanquantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NanquantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNanquantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Nansum(dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNansum(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NansumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNansumDimIntlist(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NansumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNansumIntlistOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Narrow(dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNarrow(ptr, ts.ctensor, dim, start, length)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NarrowCopy(dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNarrowCopy(ptr, ts.ctensor, dim, start, length)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NarrowCopyOut(out *Tensor, dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNarrowCopyOut(ptr, out.ctensor, ts.ctensor, dim, start, length)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NarrowTensor(dim int64, start *Tensor, length int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNarrowTensor(ptr, ts.ctensor, dim, start.ctensor, length)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func NativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgNativeBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func NativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgNativeBatchNormOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) NativeChannelShuffle(groups int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNativeChannelShuffle(ptr, ts.ctensor, groups)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func NativeDropout(input *Tensor, p float64, train bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctrain := int32(0)
if train {
ctrain = int32(1)
}
lib.AtgNativeDropout(ctensorPtr0, input.ctensor, p, ctrain)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func NativeDropoutBackward(gradOutput *Tensor, mask *Tensor, scale float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNativeDropoutBackward(ptr, gradOutput.ctensor, mask.ctensor, scale)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func NativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgNativeGroupNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, n, c, hxW, group, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func NativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgNativeLayerNorm(ctensorPtr0, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) NativeNorm(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNativeNorm(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NativeNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNativeNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ne(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNe(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ne_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNe_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) NeScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NeTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NeTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) NeTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Neg(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeg(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Neg_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNeg_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) NegOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNegOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Negative(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNegative(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Negative_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNegative_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) NegativeOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNegativeOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNewEmpty(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NewEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNewEmptyStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNewFull(ptr, ts.ctensor, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NewOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNewOnes(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNewZeros(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Nextafter(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNextafter(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Nextafter_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNextafter_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) NextafterOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNextafterOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss2d(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss2dBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLoss2dOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLossNd(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLossNd(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNllLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Nonzero(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNonzero(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NonzeroOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNonzeroOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Norm(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNorm(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NormDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNormDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func NormExceptDim(v *Tensor, pow int64, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormExceptDim(ptr, v.ctensor, pow, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NormScalaroptDim(p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNormScalaroptDim(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormScalaroptDtype(ptr, ts.ctensor, p.cscalar, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Normal(out *Tensor, mean *Tensor, std float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormal(ptr, out.ctensor, mean.ctensor, std)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Normal_(mean float64, std float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormal_(ptr, ts.ctensor, mean, std)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func NormalFloatFloatOut(out *Tensor, mean float64, std float64, size []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormalFloatFloatOut(ptr, out.ctensor, mean, std, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func NormalFloatTensorOut(out *Tensor, mean float64, std *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormalFloatTensorOut(ptr, out.ctensor, mean, std.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func NormalTensorTensorOut(out *Tensor, mean *Tensor, std *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNormalTensorTensorOut(ptr, out.ctensor, mean.ctensor, std.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NotEqual(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqual(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NotEqual_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqual_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) NotEqualScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NotEqualTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqualTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NotEqualTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqualTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) NotEqualTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNotEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NuclearNorm(keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNuclearNorm(ptr, ts.ctensor, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NuclearNormDim(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNuclearNormDim(ptr, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NuclearNormDimOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNuclearNormDimOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NuclearNormOut(out *Tensor, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgNuclearNormOut(ptr, out.ctensor, ts.ctensor, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) NumpyT(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgNumpyT(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) OneHot(numClasses int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOneHot(ptr, ts.ctensor, numClasses)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Ones(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOnes(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) OnesLike(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOnesLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func OnesOut(out *Tensor, size []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOnesOut(ptr, out.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Orgqr(input2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOrgqr(ptr, ts.ctensor, input2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) OrgqrOut(out *Tensor, input2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOrgqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ormqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cleft := int32(0)
if left {
cleft = int32(1)
}
ctranspose := int32(0)
if transpose {
ctranspose = int32(1)
}
lib.AtgOrmqr(ptr, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) OrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cleft := int32(0)
if left {
cleft = int32(1)
}
ctranspose := int32(0)
if transpose {
ctranspose = int32(1)
}
lib.AtgOrmqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Outer(vec2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOuter(ptr, ts.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) OuterOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgOuterOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) OutputNr(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgOutputNr(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func PadSequence(sequences []Tensor, batchFirst bool, paddingValue float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var csequences []lib.Ctensor
for _, t := range sequences {
csequences = append(csequences, t.ctensor)
}
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
lib.AtgPadSequence(ptr, csequences, len(csequences), cbatchFirst, paddingValue)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func PairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgPairwiseDistance(ptr, x1.ctensor, x2.ctensor, p, eps, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Pdist(p float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPdist(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Permute(dims []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPermute(ptr, ts.ctensor, dims, len(dims))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) PinMemory(device gotch.Device, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPinMemory(ptr, ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Pinverse(rcond float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPinverse(ptr, ts.ctensor, rcond)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) PixelShuffle(upscaleFactor int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPixelShuffle(ptr, ts.ctensor, upscaleFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) PixelUnshuffle(downscaleFactor int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPixelUnshuffle(ptr, ts.ctensor, downscaleFactor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Poisson(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPoisson(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func PoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
clogInput := int32(0)
if logInput {
clogInput = int32(1)
}
cfull := int32(0)
if full {
cfull = int32(1)
}
lib.AtgPoissonNllLoss(ptr, input.ctensor, target.ctensor, clogInput, cfull, eps, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Polar(abs *Tensor, angle *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolar(ptr, abs.ctensor, angle.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func PolarOut(out *Tensor, abs *Tensor, angle *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolarOut(ptr, out.ctensor, abs.ctensor, angle.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Polygamma(n int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolygamma(ptr, n, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Polygamma_(n int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolygamma_(ptr, ts.ctensor, n)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) PolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPolygammaOut(ptr, out.ctensor, n, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Positive(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPositive(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Pow(exponent *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPow(ptr, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Pow_(exponent *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPow_(ptr, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func PowScalar(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowScalar(ptr, selfScalar.cscalar, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func PowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) PowTensor_(exponent *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowTensor_(ptr, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) PowTensorScalar(exponent *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowTensorScalar(ptr, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) PowTensorScalarOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) PowTensorTensorOut(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPowTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Prelu(weight *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgPrelu(ptr, ts.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) PreluBackward(gradOutput *Tensor, weight *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgPreluBackward(ctensorPtr0, gradOutput.ctensor, ts.ctensor, weight.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Prod(dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgProd(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgProdDimInt(ptr, ts.ctensor, dim, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgProdIntOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Put(index *Tensor, source *Tensor, accumulate bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
caccumulate := int32(0)
if accumulate {
caccumulate = int32(1)
}
lib.AtgPut(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Put_(index *Tensor, source *Tensor, accumulate bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
caccumulate := int32(0)
if accumulate {
caccumulate = int32(1)
}
lib.AtgPut_(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) QPerChannelAxis(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgQPerChannelAxis(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) QPerChannelScales(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQPerChannelScales(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QPerChannelZeroPoints(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQPerChannelZeroPoints(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QScale(del bool) (retVal float64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgQScale(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) QZeroPoint(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgQZeroPoint(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) Qr(some bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
csome := int32(0)
if some {
csome = int32(1)
}
lib.AtgQr(ctensorPtr0, ts.ctensor, csome)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) QrQ(q *Tensor, r *Tensor, some bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
csome := int32(0)
if some {
csome = int32(1)
}
lib.AtgQrQ(ctensorPtr0, q.ctensor, r.ctensor, ts.ctensor, csome)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Quantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgQuantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QuantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgQuantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QuantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgQuantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QuantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgQuantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizePerChannel(ptr, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizePerTensor(ptr, ts.ctensor, scale, zeroPoint, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QuantizePerTensorDynamic(dtype gotch.DType, reduceRange bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
creduceRange := int32(0)
if reduceRange {
creduceRange = int32(1)
}
lib.AtgQuantizePerTensorDynamic(ptr, ts.ctensor, dtype.CInt(), creduceRange)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizePerTensorTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func QuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func QuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func QuantizedLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var chx []lib.Ctensor
for _, t := range hx {
chx = append(chx, t.ctensor)
}
lib.AtgQuantizedLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) QuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgQuantizedMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) QuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cceilMode := int32(0)
if ceilMode {
cceilMode = int32(1)
}
lib.AtgQuantizedMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func QuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func QuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgQuantizedRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Rad2deg(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRad2deg(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Rad2deg_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRad2deg_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Rad2degOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRad2degOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRand(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RandLike(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RandOut(out *Tensor, size []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandOut(ptr, out.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Randint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandint(ptr, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RandintLike(high int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintLike(ptr, ts.ctensor, high)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RandintLikeLowDtype(low int64, high int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintLikeLowDtype(ptr, ts.ctensor, low, high)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintLow(ptr, low, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RandintLowOut(out *Tensor, low int64, high int64, size []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintLowOut(ptr, out.ctensor, low, high, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RandintOut(out *Tensor, high int64, size []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandintOut(ptr, out.ctensor, high, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Randn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandn(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
ctensor := *ptr
retVal = newTensor(ctensor)
return retVal, err
}
func (ts *Tensor) RandnLike(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandnLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RandnOut(out *Tensor, size []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandnOut(ptr, out.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Random_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandom_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RandomFrom_(from int64, to []int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctoVal int64 = 0
var ctoNull int = 1
if len(to) > 0 {
ctoVal = to[0]
ctoNull = 0
}
lib.AtgRandomFrom_(ptr, ts.ctensor, from, ctoVal, ctoNull)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RandomTo_(to int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandomTo_(ptr, ts.ctensor, to)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func Randperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandperm(ptr, n, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RandpermOut(out *Tensor, n int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRandpermOut(ptr, out.ctensor, n)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Range(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRange(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RangeOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRangeOut(ptr, out.ctensor, start.cscalar, end.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRangeStep(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Ravel(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRavel(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Real(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReal(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Reciprocal(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReciprocal(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Reciprocal_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReciprocal_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ReciprocalOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReciprocalOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad1d(padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad1d(ptr, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad2d(padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad2d(ptr, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad3d(padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad3d(ptr, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReflectionPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReflectionPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Relu(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Relu6(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRelu6(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Relu6_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRelu6_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Relu_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRelu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Remainder(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainder(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Remainder_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainder_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RemainderScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RemainderScalarTensor(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderScalarTensor(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RemainderTensor(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderTensor(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RemainderTensor_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderTensor_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RemainderTensorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRemainderTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Renorm(p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRenorm(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Renorm_(p *Scalar, dim int64, maxnorm *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRenorm_(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRenormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, maxnorm.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Repeat(repeats []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRepeat(ptr, ts.ctensor, repeats, len(repeats))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RepeatInterleave(repeats *Tensor, outputSize []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var coutputSizeVal int64 = 0
var coutputSizeNull int = 1
if len(outputSize) > 0 {
coutputSizeVal = outputSize[0]
coutputSizeNull = 0
}
lib.AtgRepeatInterleave(ptr, repeats.ctensor, coutputSizeVal, coutputSizeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RepeatInterleaveSelfInt(repeats int64, dim []int64, outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
var coutputSizeVal int64 = 0
var coutputSizeNull int = 1
if len(outputSize) > 0 {
coutputSizeVal = outputSize[0]
coutputSizeNull = 0
}
lib.AtgRepeatInterleaveSelfInt(ptr, ts.ctensor, repeats, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RepeatInterleaveSelfTensor(repeats *Tensor, dim []int64, outputSize []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
var coutputSizeVal int64 = 0
var coutputSizeNull int = 1
if len(outputSize) > 0 {
coutputSizeVal = outputSize[0]
coutputSizeNull = 0
}
lib.AtgRepeatInterleaveSelfTensor(ptr, ts.ctensor, repeats.ctensor, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad1d(padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad1d(ptr, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad2d(padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad2d(ptr, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad3d(padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad3d(ptr, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReplicationPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReplicationPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RequiresGrad_(requiresGrad bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
crequiresGrad := int32(0)
if requiresGrad {
crequiresGrad = int32(1)
}
lib.AtgRequiresGrad_(ptr, ts.ctensor, crequiresGrad)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Reshape(shape []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReshape(ptr, ts.ctensor, shape, len(shape))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ReshapeAs(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgReshapeAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Resize_(size []int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResize_(ptr, ts.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ResizeAs_(theTemplate *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResizeAs_(ptr, ts.ctensor, theTemplate.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ResizeAsSparse_(theTemplate *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResizeAsSparse_(ptr, ts.ctensor, theTemplate.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ResolveConj(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResolveConj(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ResolveNeg(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgResolveNeg(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RetainsGrad(del bool) (retVal bool, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgRetainsGrad(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func RnnRelu(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {
cparams = append(cparams, t.ctensor)
}
chasBiases := int32(0)
if hasBiases {
chasBiases = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
lib.AtgRnnRelu(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func RnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RnnReluData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {
cparams = append(cparams, t.ctensor)
}
chasBiases := int32(0)
if hasBiases {
chasBiases = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
lib.AtgRnnReluData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func RnnTanh(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {
cparams = append(cparams, t.ctensor)
}
chasBiases := int32(0)
if hasBiases {
chasBiases = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
cbatchFirst := int32(0)
if batchFirst {
cbatchFirst = int32(1)
}
lib.AtgRnnTanh(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func RnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RnnTanhData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var cparams []lib.Ctensor
for _, t := range params {
cparams = append(cparams, t.ctensor)
}
chasBiases := int32(0)
if hasBiases {
chasBiases = int32(1)
}
ctrain := int32(0)
if train {
ctrain = int32(1)
}
cbidirectional := int32(0)
if bidirectional {
cbidirectional = int32(1)
}
lib.AtgRnnTanhData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Roll(shifts []int64, dims []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoll(ptr, ts.ctensor, shifts, len(shifts), dims, len(dims))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Rot90(k int64, dims []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRot90(ptr, ts.ctensor, k, dims, len(dims))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Round(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRound(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Round_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRound_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RoundDecimals(decimals int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoundDecimals(ptr, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RoundDecimals_(decimals int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoundDecimals_(ptr, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RoundDecimalsOut(out *Tensor, decimals int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoundDecimalsOut(ptr, out.ctensor, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RoundOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRoundOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RowStack(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgRowStack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func RowStackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgRowStackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Rrelu(training bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgRrelu(ptr, ts.ctensor, ctraining)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Rrelu_(training bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgRrelu_(ptr, ts.ctensor, ctraining)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RreluWithNoise(noise *Tensor, training bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgRreluWithNoise(ptr, ts.ctensor, noise.ctensor, ctraining)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RreluWithNoise_(noise *Tensor, training bool) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgRreluWithNoise_(ptr, ts.ctensor, noise.ctensor, ctraining)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
cselfIsResult := int32(0)
if selfIsResult {
cselfIsResult = int32(1)
}
lib.AtgRreluWithNoiseBackward(ptr, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctraining := int32(0)
if training {
ctraining = int32(1)
}
lib.AtgRreluWithNoiseOut(ptr, out.ctensor, ts.ctensor, noise.ctensor, ctraining)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Rsqrt(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsqrt(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Rsqrt_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsqrt_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) RsqrtOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsqrtOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Rsub(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsub(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) RsubScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgRsubScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScalarTensor(ptr, s.cscalar, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Scatter(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatter(ptr, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Scatter_(dim int64, index *Tensor, src *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatter_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ScatterAdd(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterAdd(ptr, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ScatterAdd_(dim int64, index *Tensor, src *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterAdd_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterReduce(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterReduce_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterSrcOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ScatterValue(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValue(ptr, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ScatterValue_(dim int64, index *Tensor, value *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValue_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ScatterValueOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValueOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ScatterValueReduce(dim int64, index *Tensor, value *Scalar, reduce string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValueReduce(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ScatterValueReduce_(dim int64, index *Tensor, value *Scalar, reduce string) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValueReduce_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, value *Scalar, reduce string, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgScatterValueReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar, reduce)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Searchsorted(sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
cright := int32(0)
if right {
cright = int32(1)
}
lib.AtgSearchsorted(ptr, sortedSequence.ctensor, ts.ctensor, coutInt32, cright, side, sorter.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SearchsortedScalar(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool, side string, sorter *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
cright := int32(0)
if right {
cright = int32(1)
}
lib.AtgSearchsortedScalar(ptr, sortedSequence.ctensor, selfScalar.cscalar, coutInt32, cright, side, sorter.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SearchsortedTensorOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
coutInt32 := int32(0)
if outInt32 {
coutInt32 = int32(1)
}
cright := int32(0)
if right {
cright = int32(1)
}
lib.AtgSearchsortedTensorOut(ptr, out.ctensor, sortedSequence.ctensor, ts.ctensor, coutInt32, cright, side, sorter.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor, axis int64, unsafety bool, initial *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunsafety := int32(0)
if unsafety {
cunsafety = int32(1)
}
lib.AtgSegmentReduce(ptr, data.ctensor, reduce, lengths.ctensor, indices.ctensor, axis, cunsafety, initial.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Select(dim int64, index int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelect(ptr, ts.ctensor, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelectBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SelectScatter(src *Tensor, dim int64, index int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelectScatter(ptr, ts.ctensor, src.ctensor, dim, index)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Selu(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Selu_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSelu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Set_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSet_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SetRequiresGrad(r bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cr := int32(0)
if r {
cr = int32(1)
}
lib.AtgSetRequiresGrad(ptr, ts.ctensor, cr)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SetSourceTensor_(source *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSetSourceTensor_(ptr, ts.ctensor, source.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Sgn(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSgn(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sgn_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSgn_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SgnOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSgnOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sigmoid(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoid(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sigmoid_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoid_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func SigmoidBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoidBackward(ptr, gradOutput.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SigmoidOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSigmoidOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sign(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSign(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sign_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSign_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SignOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSignOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Signbit(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSignbit(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SignbitOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSignbitOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Silu(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSilu(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Silu_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSilu_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SiluBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSiluBackward(ptr, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSiluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SiluOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSiluOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sin(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSin(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sin_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSin_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SinOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sinc(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sinc_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinc_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SincOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSincOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sinh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sinh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SinhOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSinhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Slice(dim int64, start []int64, end []int64, step int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstartVal int64 = 0
var cstartNull int = 1
if len(start) > 0 {
cstartVal = start[0]
cstartNull = 0
}
var cendVal int64 = 0
var cendNull int = 1
if len(end) > 0 {
cendVal = end[0]
cendNull = 0
}
lib.AtgSlice(ptr, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSliceBackward(ptr, gradOutput.ctensor, inputSizes, len(inputSizes), dim, start, end, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SliceScatter(src *Tensor, dim int64, start []int64, end []int64, step int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cstartVal int64 = 0
var cstartNull int = 1
if len(start) > 0 {
cstartVal = start[0]
cstartNull = 0
}
var cendVal int64 = 0
var cendNull int = 1
if len(end) > 0 {
cendVal = end[0]
cendNull = 0
}
lib.AtgSliceScatter(ptr, ts.ctensor, src.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Slogdet(del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgSlogdet(ctensorPtr0, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) SlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSlowConv3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSlowConv3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSlowConvDilated2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSlowConvDilated3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSlowConvTranspose2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSlowConvTranspose2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSlowConvTranspose3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSlowConvTranspose3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Smm(mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmm(ptr, ts.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmoothL1LossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, beta)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftMarginLoss(ptr, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Softmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Softplus(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftplus(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftplusBackward(ptr, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftplusBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftplusOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftplusOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Softshrink(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftshrink(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftshrinkBackward(ptr, gradOutput.ctensor, ts.ctensor, lambd.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, lambd.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SoftshrinkOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSoftshrinkOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Solve(a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgSolve(ctensorPtr0, ts.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) SolveSolution(solution *Tensor, lu *Tensor, a *Tensor, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
lib.AtgSolveSolution(ctensorPtr0, solution.ctensor, lu.ctensor, ts.ctensor, a.ctensor)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Sort(dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cdescending := int32(0)
if descending {
cdescending = int32(1)
}
lib.AtgSort(ctensorPtr0, ts.ctensor, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) SortStable(stable bool, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cstable := int32(0)
if stable {
cstable = int32(1)
}
cdescending := int32(0)
if descending {
cdescending = int32(1)
}
lib.AtgSortStable(ctensorPtr0, ts.ctensor, cstable, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) SortValues(values *Tensor, indices *Tensor, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cdescending := int32(0)
if descending {
cdescending = int32(1)
}
lib.AtgSortValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) SortValuesStable(values *Tensor, indices *Tensor, stable bool, dim int64, descending bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cstable := int32(0)
if stable {
cstable = int32(1)
}
cdescending := int32(0)
if descending {
cdescending = int32(1)
}
lib.AtgSortValuesStable(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, cstable, dim, cdescending)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func SparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCooTensor(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCooTensorIndices(ptr, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCooTensorIndicesSize(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCsrTensor(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseCsrTensorCrowColValueSize(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SparseDim(del bool) (retVal int64, err error) {
if del {
defer ts.MustDrop()
}
retVal = lib.AtgSparseDim(ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
return retVal, err
}
func (ts *Tensor) SparseMask(mask *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseMask(ptr, ts.ctensor, mask.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SparseResize_(size []int64, sparseDim int64, denseDim int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseResize_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseResizeAndClear_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SparseSampledAddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseSampledAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SparseSampledAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSparseSampledAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialDigamma(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialDigamma(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialDigammaOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialDigammaOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialEntr(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialEntr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialEntrOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialEntrOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialErf(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErf(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialErfOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialErfc(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialErfcOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfcOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialErfcx(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfcx(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialErfcxOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfcxOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialErfinv(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfinv(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialErfinvOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialErfinvOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialExp2(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExp2(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialExp2Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExp2Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialExpit(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExpit(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialExpitOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExpitOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialExpm1(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExpm1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialExpm1Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialExpm1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialGammainc(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammainc(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialGammaincOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammaincOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialGammaincc(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammaincc(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialGammainccOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammainccOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialGammaln(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammaln(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialGammalnOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialGammalnOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialI0(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI0(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialI0Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI0Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialI0e(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI0e(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialI0eOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI0eOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialI1(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI1(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialI1Out(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI1Out(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialI1e(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI1e(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialI1eOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialI1eOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialLog1p(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLog1p(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialLog1pOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLog1pOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialLogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialLogit(eps []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgSpecialLogit(ptr, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialLogitOut(out *Tensor, eps []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cepsVal float64 = 0.0
var cepsNull int = 1
if len(eps) > 0 {
cepsVal = eps[0]
cepsNull = 0
}
lib.AtgSpecialLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialLogsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgSpecialLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgSpecialLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialMultigammaln(p int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialMultigammaln(ptr, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialMultigammalnOut(out *Tensor, p int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialMultigammalnOut(ptr, out.ctensor, ts.ctensor, p)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialNdtr(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialNdtr(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialNdtrOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialNdtrOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialNdtri(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialNdtri(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialNdtriOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialNdtriOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialPolygamma(n int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialPolygamma(ptr, n, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialPolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialPolygammaOut(ptr, out.ctensor, n, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialPsi(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialPsi(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialPsiOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialPsiOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialRound(decimals int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialRound(ptr, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialRoundOut(out *Tensor, decimals int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialRoundOut(ptr, out.ctensor, ts.ctensor, decimals)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialSinc(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialSinc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialSincOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialSincOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialSoftmax(ptr, ts.ctensor, dim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialXlog1py(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1py(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialXlog1pyOtherScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pyOtherScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialXlog1pyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pyOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pySelfScalar(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlog1pySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialXlogy(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogy(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialXlogyOtherScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogyOtherScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialXlogyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogyOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogySelfScalar(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialXlogySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialZeta(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZeta(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialZetaOtherScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaOtherScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SpecialZetaOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaSelfScalar(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func SpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSpecialZetaSelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sqrt(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqrt(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sqrt_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqrt_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SqrtOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqrtOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Square(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSquare(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Square_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSquare_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SquareOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSquareOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Squeeze(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueeze(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Squeeze_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueeze_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SqueezeDim(dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueezeDim(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SqueezeDim_(dim int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSqueezeDim_(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Sspaddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSspaddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSspaddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Stack(tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgStack(ptr, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func StackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgStackOut(ptr, out.ctensor, ctensors, len(ctensors), dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Std(unbiased bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
lib.AtgStd(ptr, ts.ctensor, cunbiased)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) StdCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgStdCorrection(ptr, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) StdCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgStdCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) StdDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgStdDim(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) StdMean(unbiased bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
lib.AtgStdMean(ctensorPtr0, ts.ctensor, cunbiased)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) StdMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgStdMeanCorrection(ctensorPtr0, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) StdMeanDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgStdMeanDim(ctensorPtr0, ts.ctensor, dim, len(dim), cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) StdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgStdOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Stft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var chopLengthVal int64 = 0
var chopLengthNull int = 1
if len(hopLength) > 0 {
chopLengthVal = hopLength[0]
chopLengthNull = 0
}
var cwinLengthVal int64 = 0
var cwinLengthNull int = 1
if len(winLength) > 0 {
cwinLengthVal = winLength[0]
cwinLengthNull = 0
}
cnormalized := int32(0)
if normalized {
cnormalized = int32(1)
}
conesided := int32(0)
if onesided {
conesided = int32(1)
}
creturnComplex := int32(0)
if returnComplex {
creturnComplex = int32(1)
}
lib.AtgStft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, cnormalized, conesided, creturnComplex)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sub(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSub(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Sub_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSub_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SubOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SubScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SubScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Subtract(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtract(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Subtract_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtract_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) SubtractOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtractOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SubtractScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtractScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SubtractScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSubtractScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Sum(dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSum(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgSumDimIntlist(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgSumIntlistOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) SumToSize(size []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSumToSize(ptr, ts.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Svd(some bool, computeUv bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csome := int32(0)
if some {
csome = int32(1)
}
ccomputeUv := int32(0)
if computeUv {
ccomputeUv = int32(1)
}
lib.AtgSvd(ctensorPtr0, ts.ctensor, csome, ccomputeUv)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) SvdU(u *Tensor, s *Tensor, v *Tensor, some bool, computeUv bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csome := int32(0)
if some {
csome = int32(1)
}
ccomputeUv := int32(0)
if computeUv {
ccomputeUv = int32(1)
}
lib.AtgSvdU(ctensorPtr0, u.ctensor, s.ctensor, v.ctensor, ts.ctensor, csome, ccomputeUv)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) Swapaxes(axis0 int64, axis1 int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSwapaxes(ptr, ts.ctensor, axis0, axis1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Swapaxes_(axis0 int64, axis1 int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSwapaxes_(ptr, ts.ctensor, axis0, axis1)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Swapdims(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSwapdims(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Swapdims_(dim0 int64, dim1 int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgSwapdims_(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Symeig(eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ceigenvectors := int32(0)
if eigenvectors {
ceigenvectors = int32(1)
}
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgSymeig(ctensorPtr0, ts.ctensor, ceigenvectors, cupper)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) SymeigE(e *Tensor, v *Tensor, eigenvectors bool, upper bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ceigenvectors := int32(0)
if eigenvectors {
ceigenvectors = int32(1)
}
cupper := int32(0)
if upper {
cupper = int32(1)
}
lib.AtgSymeigE(ctensorPtr0, e.ctensor, v.ctensor, ts.ctensor, ceigenvectors, cupper)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) T(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgT(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) T_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgT_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Take(index *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTake(ptr, ts.ctensor, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TakeAlongDim(indices *Tensor, dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgTakeAlongDim(ptr, ts.ctensor, indices.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgTakeAlongDimOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TakeOut(out *Tensor, index *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTakeOut(ptr, out.ctensor, ts.ctensor, index.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Tan(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTan(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Tan_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTan_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) TanOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Tanh(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanh(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Tanh_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanh_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func TanhBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanhBackward(ptr, gradOutput.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func TanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TanhOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTanhOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Tensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTensordot(ptr, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTensordotOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Threshold(threshold *Scalar, value *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThreshold(ptr, ts.ctensor, threshold.cscalar, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Threshold_(threshold *Scalar, value *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThreshold_(ptr, ts.ctensor, threshold.cscalar, value.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) ThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThresholdBackward(ptr, gradOutput.ctensor, ts.ctensor, threshold.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThresholdBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, threshold.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgThresholdOut(ptr, out.ctensor, ts.ctensor, threshold.cscalar, value.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Tile(dims []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTile(ptr, ts.ctensor, dims, len(dims))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) To(device gotch.Device, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTo(ptr, ts.ctensor, device.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ToDense(dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToDense(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ToDenseBackward(grad *Tensor, input *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToDenseBackward(ptr, grad.ctensor, input.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ToDevice(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
ccopy := int32(0)
if copy {
ccopy = int32(1)
}
lib.AtgToDevice(ptr, ts.ctensor, device.CInt(), dtype.CInt(), cnonBlocking, ccopy)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ToDtype(dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
ccopy := int32(0)
if copy {
ccopy = int32(1)
}
lib.AtgToDtype(ptr, ts.ctensor, dtype.CInt(), cnonBlocking, ccopy)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ToDtypeLayout(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
ccopy := int32(0)
if copy {
ccopy = int32(1)
}
lib.AtgToDtypeLayout(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking, ccopy)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ToMkldnn(dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToMkldnn(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ToMkldnnBackward(grad *Tensor, input *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToMkldnnBackward(ptr, grad.ctensor, input.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ToOther(other *Tensor, nonBlocking bool, copy bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cnonBlocking := int32(0)
if nonBlocking {
cnonBlocking = int32(1)
}
ccopy := int32(0)
if copy {
ccopy = int32(1)
}
lib.AtgToOther(ptr, ts.ctensor, other.ctensor, cnonBlocking, ccopy)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ToSparse(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToSparse(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ToSparseSparseDim(sparseDim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgToSparseSparseDim(ptr, ts.ctensor, sparseDim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Topk(k int64, dim int64, largest bool, sorted bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
clargest := int32(0)
if largest {
clargest = int32(1)
}
csorted := int32(0)
if sorted {
csorted = int32(1)
}
lib.AtgTopk(ctensorPtr0, ts.ctensor, k, dim, clargest, csorted)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) TopkValues(values *Tensor, indices *Tensor, k int64, dim int64, largest bool, sorted bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
clargest := int32(0)
if largest {
clargest = int32(1)
}
csorted := int32(0)
if sorted {
csorted = int32(1)
}
lib.AtgTopkValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, clargest, csorted)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Totype(scalarType gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTotype(ptr, ts.ctensor, scalarType.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Trace(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrace(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func TraceBackward(grad *Tensor, sizes []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTraceBackward(ptr, grad.ctensor, sizes, len(sizes))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Transpose(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Transpose_(dim0 int64, dim1 int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTranspose_(ptr, ts.ctensor, dim0, dim1)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func Trapezoid(y *Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrapezoid(ptr, y.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func TrapezoidX(y *Tensor, x *Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrapezoidX(ptr, y.ctensor, x.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Trapz(y *Tensor, x *Tensor, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrapz(ptr, y.ctensor, x.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func TrapzDx(y *Tensor, dx float64, dim int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrapzDx(ptr, y.ctensor, dx, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TriangularSolve(a *Tensor, upper bool, transpose bool, unitriangular bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
ctranspose := int32(0)
if transpose {
ctranspose = int32(1)
}
cunitriangular := int32(0)
if unitriangular {
cunitriangular = int32(1)
}
lib.AtgTriangularSolve(ctensorPtr0, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) TriangularSolveX(x *Tensor, m *Tensor, a *Tensor, upper bool, transpose bool, unitriangular bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cupper := int32(0)
if upper {
cupper = int32(1)
}
ctranspose := int32(0)
if transpose {
ctranspose = int32(1)
}
cunitriangular := int32(0)
if unitriangular {
cunitriangular = int32(1)
}
lib.AtgTriangularSolveX(ctensorPtr0, x.ctensor, m.ctensor, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) Tril(diagonal int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTril(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Tril_(diagonal int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTril_(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func TrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrilIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TrilOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrilOut(ptr, out.ctensor, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func TripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cswap := int32(0)
if swap {
cswap = int32(1)
}
lib.AtgTripletMarginLoss(ptr, anchor.ctensor, positive.ctensor, negative.ctensor, margin, p, eps, cswap, reduction)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Triu(diagonal int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriu(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Triu_(diagonal int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriu_(ptr, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func TriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriuIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TriuOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTriuOut(ptr, out.ctensor, ts.ctensor, diagonal)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TrueDivide(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivide(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TrueDivide_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivide_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) TrueDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TrueDivideScalar(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivideScalar(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TrueDivideScalar_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrueDivideScalar_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) Trunc(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrunc(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Trunc_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTrunc_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) TruncOut(out *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTruncOut(ptr, out.ctensor, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) TypeAs(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgTypeAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Unflatten(dim int64, sizes []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnflatten(ptr, ts.ctensor, dim, sizes, len(sizes))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnfold(ptr, ts.ctensor, dimension, size, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnfoldBackward(ptr, gradIn.ctensor, inputSizes, len(inputSizes), dim, size, step)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Uniform_(from float64, to float64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUniform_(ptr, ts.ctensor, from, to)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) UniqueConsecutive(returnInverse bool, returnCounts bool, dim []int64, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
creturnInverse := int32(0)
if returnInverse {
creturnInverse = int32(1)
}
creturnCounts := int32(0)
if returnCounts {
creturnCounts = int32(1)
}
var cdimVal int64 = 0
var cdimNull int = 1
if len(dim) > 0 {
cdimVal = dim[0]
cdimNull = 0
}
lib.AtgUniqueConsecutive(ctensorPtr0, ts.ctensor, creturnInverse, creturnCounts, cdimVal, cdimNull)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) UniqueDim(dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
csorted := int32(0)
if sorted {
csorted = int32(1)
}
creturnInverse := int32(0)
if returnInverse {
creturnInverse = int32(1)
}
creturnCounts := int32(0)
if returnCounts {
creturnCounts = int32(1)
}
lib.AtgUniqueDim(ctensorPtr0, ts.ctensor, dim, csorted, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) UniqueDimConsecutive(dim int64, returnInverse bool, returnCounts bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr0)))
creturnInverse := int32(0)
if returnInverse {
creturnInverse = int32(1)
}
creturnCounts := int32(0)
if returnCounts {
creturnCounts = int32(1)
}
lib.AtgUniqueDimConsecutive(ctensorPtr0, ts.ctensor, dim, creturnInverse, creturnCounts)
if err = TorchErr(); err != nil {
return retVal0, retVal1, retVal2, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
retVal2 = newTensor(*ctensorPtr2)
return retVal0, retVal1, retVal2, err
}
func (ts *Tensor) Unsqueeze(dim int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnsqueeze(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Unsqueeze_(dim int64) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgUnsqueeze_(ptr, ts.ctensor, dim)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleBicubic2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBicubic2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleBilinear2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBilinear2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleLinear1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleLinear1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleNearest1d(outputSize []int64, scales []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleNearest1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleNearest1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesVal float64 = 0.0
var cscalesNull int = 1
if len(scales) > 0 {
cscalesVal = scales[0]
cscalesNull = 0
}
lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleNearest2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleNearest3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func UpsampleTrilinear3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleTrilinear3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) UpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
calignCorners := int32(0)
if alignCorners {
calignCorners = int32(1)
}
var cscalesDVal float64 = 0.0
var cscalesDNull int = 1
if len(scalesD) > 0 {
cscalesDVal = scalesD[0]
cscalesDNull = 0
}
var cscalesHVal float64 = 0.0
var cscalesHNull int = 1
if len(scalesH) > 0 {
cscalesHVal = scalesH[0]
cscalesHNull = 0
}
var cscalesWVal float64 = 0.0
var cscalesWNull int = 1
if len(scalesW) > 0 {
cscalesWVal = scalesW[0]
cscalesWNull = 0
}
lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgValueSelectingReductionBackward(ptr, grad.ctensor, dim, indices.ctensor, sizes, len(sizes), ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Values(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgValues(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Vander(x *Tensor, n []int64, increasing bool) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var cnVal int64 = 0
var cnNull int = 1
if len(n) > 0 {
cnVal = n[0]
cnNull = 0
}
cincreasing := int32(0)
if increasing {
cincreasing = int32(1)
}
lib.AtgVander(ptr, x.ctensor, cnVal, cnNull, cincreasing)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Var(unbiased bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
lib.AtgVar(ptr, ts.ctensor, cunbiased)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) VarCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgVarCorrection(ptr, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) VarCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgVarCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) VarDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgVarDim(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) VarMean(unbiased bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
lib.AtgVarMean(ctensorPtr0, ts.ctensor, cunbiased)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) VarMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
var ccorrectionVal int64 = 0
var ccorrectionNull int = 1
if len(correction) > 0 {
ccorrectionVal = correction[0]
ccorrectionNull = 0
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgVarMeanCorrection(ctensorPtr0, ts.ctensor, dim, len(dim), ccorrectionVal, ccorrectionNull, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) VarMeanDim(dim []int64, unbiased bool, keepdim bool, del bool) (retVal0 *Tensor, retVal1 *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ctensorPtr0 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr0)) + unsafe.Sizeof(ctensorPtr0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgVarMeanDim(ctensorPtr0, ts.ctensor, dim, len(dim), cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal0, retVal1, err
}
retVal0 = newTensor(*ctensorPtr0)
retVal1 = newTensor(*ctensorPtr1)
return retVal0, retVal1, err
}
func (ts *Tensor) VarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
cunbiased := int32(0)
if unbiased {
cunbiased = int32(1)
}
ckeepdim := int32(0)
if keepdim {
ckeepdim = int32(1)
}
lib.AtgVarOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Vdot(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgVdot(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) VdotOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgVdotOut(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) View(size []int64, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgView(ptr, ts.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ViewAs(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAs(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ViewAsComplex(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAsComplex(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ViewAsReal(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewAsReal(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ViewDtype(dtype gotch.DType, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgViewDtype(ptr, ts.ctensor, dtype.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func Vstack(tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgVstack(ptr, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func VstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
var ctensors []lib.Ctensor
for _, t := range tensors {
ctensors = append(ctensors, t.ctensor)
}
lib.AtgVstackOut(ptr, out.ctensor, ctensors, len(ctensors))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func WhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereScalar(ptr, condition.ctensor, selfScalar.cscalar, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) WhereScalarother(condition *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereScalarother(ptr, condition.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func WhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereScalarself(ptr, condition.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) WhereSelf(condition *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgWhereSelf(ptr, condition.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Xlogy(other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogy(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Xlogy_(other *Tensor) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogy_(ptr, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func (ts *Tensor) XlogyOutscalarOther(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyOutscalarOther(ptr, out.ctensor, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func XlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyOutscalarSelf(ptr, out.ctensor, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) XlogyOuttensor(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyOuttensor(ptr, out.ctensor, ts.ctensor, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) XlogyScalarOther(other *Scalar, del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyScalarOther(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) XlogyScalarOther_(other *Scalar) (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyScalarOther_(ptr, ts.ctensor, other.cscalar)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func XlogyScalarSelf(selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgXlogyScalarSelf(ptr, selfScalar.cscalar, other.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) Zero_() (err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZero_(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return err
}
ts.ctensor = *ptr
return err
}
func Zeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZeros(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt())
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func (ts *Tensor) ZerosLike(del bool) (retVal *Tensor, err error) {
if del {
defer ts.MustDrop()
}
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZerosLike(ptr, ts.ctensor)
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
func ZerosOut(out *Tensor, size []int64) (retVal *Tensor, err error) {
ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))
lib.AtgZerosOut(ptr, out.ctensor, size, len(size))
if err = TorchErr(); err != nil {
return retVal, err
}
retVal = newTensor(*ptr)
return retVal, err
}
// End of implementing Tensor =================================