gotch/ts/must-tensor-generated.go
Goncalves Henriques, Andre (UG - Computer Science) 9257404edd Move the name of the module
2024-04-21 15:15:00 +01:00

19892 lines
551 KiB
Go

package ts
// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!
import(
"log"
"git.andr3h3nriqu3s.com/andr3/gotch"
)
func(ts *Tensor) Must__And_(other *Scalar)() {
err := ts.__And_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__AndTensor_(other *Tensor)() {
err := ts.__AndTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Iand_(other *Scalar)() {
err := ts.__Iand_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IandTensor_(other *Tensor)() {
err := ts.__IandTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Ilshift_(other *Scalar)() {
err := ts.__Ilshift_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IlshiftTensor_(other *Tensor)() {
err := ts.__IlshiftTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Ior_(other *Scalar)() {
err := ts.__Ior_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IorTensor_(other *Tensor)() {
err := ts.__IorTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Irshift_(other *Scalar)() {
err := ts.__Irshift_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IrshiftTensor_(other *Tensor)() {
err := ts.__IrshiftTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Ixor_(other *Scalar)() {
err := ts.__Ixor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IxorTensor_(other *Tensor)() {
err := ts.__IxorTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Lshift_(other *Scalar)() {
err := ts.__Lshift_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__LshiftScalarOut_(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.__LshiftScalarOut_(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must__LshiftTensor_(other *Tensor)() {
err := ts.__LshiftTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__LshiftTensorOut_(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.__LshiftTensorOut_(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must__Or_(other *Scalar)() {
err := ts.__Or_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__OrTensor_(other *Tensor)() {
err := ts.__OrTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Rshift_(other *Scalar)() {
err := ts.__Rshift_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__RshiftScalarOut_(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.__RshiftScalarOut_(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must__RshiftTensor_(other *Tensor)() {
err := ts.__RshiftTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__RshiftTensorOut_(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.__RshiftTensorOut_(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must__Xor_(other *Scalar)() {
err := ts.__Xor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__XorTensor_(other *Tensor)() {
err := ts.__XorTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool2d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool2dBackwardOut(out *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool2dBackwardOut(out, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool2dOut(out, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool3d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool3dBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool3dBackwardOut(out *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool3dBackwardOut(out, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool3dOut(out, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddBatchDim(batchDim int64, level int64, del bool)(retVal *Tensor) {
retVal, err := ts._AddBatchDim(batchDim, level, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddRelu(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AddRelu(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddRelu_(other *Tensor)() {
err := ts._AddRelu_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_AddReluOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AddReluOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddReluScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts._AddReluScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddReluScalar_(other *Scalar)() {
err := ts._AddReluScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_AddReluScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts._AddReluScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddmmActivation(mat1 *Tensor, mat2 *Tensor, useGelu bool, del bool)(retVal *Tensor) {
retVal, err := ts._AddmmActivation(mat1, mat2, useGelu, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddmmActivationOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, useGelu bool, del bool)(retVal *Tensor) {
retVal, err := ts._AddmmActivationOut(out, mat1, mat2, useGelu, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Aminmax(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._Aminmax(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_AminmaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._AminmaxDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_AminmaxDimOut(out0 *Tensor, out1 *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._AminmaxDimOut(out0, out1, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_AminmaxOut(out0 *Tensor, out1 *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._AminmaxOut(out0, out1, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_AmpUpdateScale(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._AmpUpdateScale(growthTracker, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)() {
err := ts._AmpUpdateScale_(growthTracker, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_AmpUpdateScaleOut(out *Tensor, growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64, del bool)(retVal *Tensor) {
retVal, err := ts._AmpUpdateScaleOut(out, growthTracker, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AutocastToFullPrecision(cudaEnabled bool, cpuEnabled bool, del bool)(retVal *Tensor) {
retVal, err := ts._AutocastToFullPrecision(cudaEnabled, cpuEnabled, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AutocastToReducedPrecision(cudaEnabled bool, cpuEnabled bool, cudaDtype gotch.DType, cpuDtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._AutocastToReducedPrecision(cudaEnabled, cpuEnabled, cudaDtype, cpuDtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastByte(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastByte(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastChar(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastChar(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastDouble(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastDouble(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastFloat(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastFloat(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastHalf(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastHalf(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastInt(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastInt(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastLong(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastLong(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastShort(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastShort(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor)(retVal *Tensor) {
retVal, err := _CdistBackward(grad, x1, x2, p, cdist)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CdistBackwardOut(out *Tensor, grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor)(retVal *Tensor) {
retVal, err := _CdistBackwardOut(out, grad, x1, x2, p, cdist)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CholeskySolveHelper(a *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts._CholeskySolveHelper(a, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CholeskySolveHelperOut(out *Tensor, a *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts._CholeskySolveHelperOut(out, a, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Coalesce(del bool)(retVal *Tensor) {
retVal, err := ts._Coalesce(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CoalesceOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._CoalesceOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Coalesced(coalesced bool, del bool)(retVal *Tensor) {
retVal, err := ts._Coalesced(coalesced, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Coalesced_(coalesced bool)() {
err := ts._Coalesced_(coalesced)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_CoalescedOut(out *Tensor, coalesced bool, del bool)(retVal *Tensor) {
retVal, err := ts._CoalescedOut(out, coalesced, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ComputeLinearCombination(input *Tensor, coefficients *Tensor)(retVal *Tensor) {
retVal, err := _ComputeLinearCombination(input, coefficients)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor)(retVal *Tensor) {
retVal, err := _ComputeLinearCombinationOut(out, input, coefficients)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Conj(del bool)(retVal *Tensor) {
retVal, err := ts._Conj(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConjCopy(del bool)(retVal *Tensor) {
retVal, err := ts._ConjCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConjCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._ConjCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConjPhysical(del bool)(retVal *Tensor) {
retVal, err := ts._ConjPhysical(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._ConjPhysicalOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConvDepthwise2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ConvDepthwise2d(weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConvDepthwise2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ConvDepthwise2dOut(out, weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool)(retVal *Tensor) {
retVal, err := ts._ConvertIndicesFromCooToCsr(size, outInt32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt32 bool, del bool)(retVal *Tensor) {
retVal, err := ts._ConvertIndicesFromCooToCsrOut(out, size, outInt32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ConvertIndicesFromCsrToCoo(crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool)(retVal *Tensor) {
retVal, err := _ConvertIndicesFromCsrToCoo(crowIndices, colIndices, outInt32, transpose)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ConvertIndicesFromCsrToCooOut(out *Tensor, crowIndices *Tensor, colIndices *Tensor, outInt32 bool, transpose bool)(retVal *Tensor) {
retVal, err := _ConvertIndicesFromCsrToCooOut(out, crowIndices, colIndices, outInt32, transpose)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool)(retVal *Tensor) {
retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled, allowTf32)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ConvolutionDeprecated(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal *Tensor) {
retVal, err := _ConvolutionDeprecated(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := _ConvolutionMode(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ConvolutionOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool)(retVal *Tensor) {
retVal, err := _ConvolutionOut(out, input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled, allowTf32)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CopyFrom(dst *Tensor, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CopyFrom(dst, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CopyFromAndResize(dst *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._CopyFromAndResize(dst, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CopyFromAndResizeOut(out *Tensor, dst *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._CopyFromAndResizeOut(out, dst, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CopyFromOut(out *Tensor, dst *Tensor, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CopyFromOut(out, dst, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CsltCompress(input *Tensor)(retVal *Tensor) {
retVal, err := _CsltCompress(input)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CsltSparseMm(compressedA *Tensor, denseB *Tensor, bias *Tensor, transposeResult bool)(retVal *Tensor) {
retVal, err := _CsltSparseMm(compressedA, denseB, bias, transposeResult)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor) {
retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CtcLossBackwardOut(out *Tensor, grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor) {
retVal, err := _CtcLossBackwardOut(out, grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CtcLossBackwardTensor(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor) {
retVal, err := _CtcLossBackwardTensor(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CtcLossOut(out0 *Tensor, out1 *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CtcLossOut(out0, out1, logProbs, targets, inputLengths, targetLengths, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CtcLossTensor(logProbs, targets, inputLengths, targetLengths, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CtcLossTensorOut(out0 *Tensor, out1 *Tensor, logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CtcLossTensorOut(out0, out1, logProbs, targets, inputLengths, targetLengths, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CudnnCtcLoss(logProbs, targets, inputLengths, targetLengths, blank, deterministic, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CudnnCtcLossOut(out0 *Tensor, out1 *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CudnnCtcLossOut(out0, out1, logProbs, targets, inputLengths, targetLengths, blank, deterministic, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CudnnCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CudnnCtcLossTensor(logProbs, targets, inputLengths, targetLengths, blank, deterministic, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CudnnInitDropoutStateOut(out *Tensor, dropout float64, train bool, dropoutSeed int64)(retVal *Tensor) {
retVal, err := _CudnnInitDropoutStateOut(out, dropout, train, dropoutSeed)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CudnnRnn(input *Tensor, weight []*Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, err := _CudnnRnn(input, weight, weightStride0, weightBuf, hx, cx, mode, hiddenSize, projSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4
}
func Must_CudnnRnnFlattenWeight(weightArr []*Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal *Tensor) {
retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, batchFirst, bidirectional)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CudnnRnnFlattenWeightOut(out *Tensor, weightArr []*Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal *Tensor) {
retVal, err := _CudnnRnnFlattenWeightOut(out, weightArr, weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, batchFirst, bidirectional)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CudnnRnnOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, out4 *Tensor, input *Tensor, weight []*Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, err := _CudnnRnnOut(out0, out1, out2, out3, out4, input, weight, weightStride0, weightBuf, hx, cx, mode, hiddenSize, projSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4
}
func(ts *Tensor) Must_DebugHasInternalOverlap(del bool)(retVal int64) {
retVal, err := ts._DebugHasInternalOverlap(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_DimArange(like *Tensor, dim int64)(retVal *Tensor) {
retVal, err := _DimArange(like, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Dimi(del bool)(retVal int64) {
retVal, err := ts._Dimi(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Dimv(del bool)(retVal int64) {
retVal, err := ts._Dimv(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor) {
retVal, err := _DirichletGrad(x, alpha, total)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_DirichletGradOut(out *Tensor, x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor) {
retVal, err := _DirichletGradOut(out, x, alpha, total)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EfficientAttentionBackward(gradOut_ *Tensor, query *Tensor, key *Tensor, value *Tensor, bias *Tensor, out *Tensor, cuSeqlensQ *Tensor, cuSeqlensK *Tensor, maxSeqlenK int64, maxSeqlenQ int64, logsumexp *Tensor, dropoutP float64, philoxSeed *Tensor, philoxOffset *Tensor, customMaskType int64, biasRequiresGrad bool, scale []float64, numSplitsKey []int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _EfficientAttentionBackward(gradOut_, query, key, value, bias, out, cuSeqlensQ, cuSeqlensK, maxSeqlenK, maxSeqlenQ, logsumexp, dropoutP, philoxSeed, philoxOffset, customMaskType, biasRequiresGrad, scale, numSplitsKey)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_Efficientzerotensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _Efficientzerotensor(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EfficientzerotensorOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := _EfficientzerotensorOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBag(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagDenseBackward(grad, indices, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBagDenseBackwardOut(out *Tensor, grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagDenseBackwardOut(out, grad, indices, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBagForwardOnly(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBagForwardOnly(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_EmbeddingBagForwardOnlyOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBagForwardOnlyOut(out0, out1, out2, out3, weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_EmbeddingBagOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBagOut(out0, out1, out2, out3, weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBagPerSampleWeightsBackwardOut(out *Tensor, grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagPerSampleWeightsBackwardOut(out, grad, weight, indices, offsets, offset2bag, mode, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal *Tensor) {
retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmptyAffineQuantizedOut(out *Tensor, size []int64, scale float64, zeroPoint int64)(retVal *Tensor) {
retVal, err := _EmptyAffineQuantizedOut(out, size, scale, zeroPoint)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmptyPerChannelAffineQuantizedOut(out *Tensor, size []int64, scales *Tensor, zeroPoints *Tensor, axis int64)(retVal *Tensor) {
retVal, err := _EmptyPerChannelAffineQuantizedOut(out, size, scales, zeroPoints, axis)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EuclideanDist(x1 *Tensor, x2 *Tensor)(retVal *Tensor) {
retVal, err := _EuclideanDist(x1, x2)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EuclideanDistOut(out *Tensor, x1 *Tensor, x2 *Tensor)(retVal *Tensor) {
retVal, err := _EuclideanDistOut(out, x1, x2)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor) {
retVal, err := ts._FakeQuantizeLearnablePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._FakeQuantizeLearnablePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffineOut(out *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor) {
retVal, err := ts._FakeQuantizeLearnablePerChannelAffineOut(out, scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor) {
retVal, err := ts._FakeQuantizeLearnablePerTensorAffine(scale, zeroPoint, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._FakeQuantizeLearnablePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffineOut(out *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor) {
retVal, err := ts._FakeQuantizeLearnablePerTensorAffineOut(out, scale, zeroPoint, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FakeQuantizePerTensorAffineCachemaskTensorQparams(scale, zeroPoint, fakeQuantEnabled, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_FakeQuantizePerTensorAffineCachemaskTensorQparamsOut(out0 *Tensor, out1 *Tensor, scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FakeQuantizePerTensorAffineCachemaskTensorQparamsOut(out0, out1, scale, zeroPoint, fakeQuantEnabled, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_FftC2c(dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor) {
retVal, err := ts._FftC2c(dim, normalization, forward, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftC2cOut(out *Tensor, dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor) {
retVal, err := ts._FftC2cOut(out, dim, normalization, forward, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftC2r(dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor) {
retVal, err := ts._FftC2r(dim, normalization, lastDimSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftC2rOut(out *Tensor, dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor) {
retVal, err := ts._FftC2rOut(out, dim, normalization, lastDimSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftR2c(dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor) {
retVal, err := ts._FftR2c(dim, normalization, onesided, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftR2cOut(out *Tensor, dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor) {
retVal, err := ts._FftR2cOut(out, dim, normalization, onesided, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FillMemEffDropoutMask_(dropoutP float64, seed int64, offset int64)() {
err := ts._FillMemEffDropoutMask_(dropoutP, seed, offset)
if err != nil { log.Fatal(err) }
return
}
func Must_FlashAttentionBackward(gradOut *Tensor, query *Tensor, key *Tensor, value *Tensor, out *Tensor, logsumexp *Tensor, cumSeqQ *Tensor, cumSeqK *Tensor, maxQ int64, maxK int64, dropoutP float64, isCausal bool, philoxSeed *Tensor, philoxOffset *Tensor, scale []float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _FlashAttentionBackward(gradOut, query, key, value, out, logsumexp, cumSeqQ, cumSeqK, maxQ, maxK, dropoutP, isCausal, philoxSeed, philoxOffset, scale)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_Foobar(arg1 bool, arg2 bool, arg3 bool, del bool)(retVal *Tensor) {
retVal, err := ts._Foobar(arg1, arg2, arg3, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FoobarOut(out *Tensor, arg1 bool, arg2 bool, arg3 bool, del bool)(retVal *Tensor) {
retVal, err := ts._FoobarOut(out, arg1, arg2, arg3, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FunctionalAssertAsync(assertMsg string, depToken *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._FunctionalAssertAsync(assertMsg, depToken, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_FunctionalSymConstrainRange(size *Scalar, min []int64, max []int64, depToken *Tensor)(retVal *Tensor) {
retVal, err := _FunctionalSymConstrainRange(size, min, max, depToken)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_FunctionalSymConstrainRangeForSize(size *Scalar, min []int64, max []int64, depToken *Tensor)(retVal *Tensor) {
retVal, err := _FunctionalSymConstrainRangeForSize(size, min, max, depToken)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FusedDropout(p float64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FusedDropout(p, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_FusedDropoutOut(out0 *Tensor, out1 *Tensor, p float64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FusedDropoutOut(out0, out1, p, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_FusedMovingAvgObsFqHelper(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FusedMovingAvgObsFqHelper(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_FusedMovingAvgObsFqHelperFunctional(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err := ts._FusedMovingAvgObsFqHelperFunctional(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5
}
func(ts *Tensor) Must_FusedMovingAvgObsFqHelperOut(out0 *Tensor, out1 *Tensor, observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FusedMovingAvgObsFqHelperOut(out0, out1, observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_FusedSdpChoice(query *Tensor, key *Tensor, value *Tensor, attnMask *Tensor, dropoutP float64, isCausal bool, scale []float64)(retVal int64) {
retVal, err := _FusedSdpChoice(query, key, value, attnMask, dropoutP, isCausal, scale)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FwPrimal(level int64, del bool)(retVal *Tensor) {
retVal, err := ts._FwPrimal(level, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FwPrimalCopy(level int64, del bool)(retVal *Tensor) {
retVal, err := ts._FwPrimalCopy(level, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FwPrimalCopyOut(out *Tensor, level int64, del bool)(retVal *Tensor) {
retVal, err := ts._FwPrimalCopyOut(out, level, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._GatherSparseBackward(dim, index, grad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := _GridSampler2dCpuFallback(input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _GridSampler2dCpuFallbackBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_GridSampler2dCpuFallbackOut(out *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := _GridSampler2dCpuFallbackOut(out, input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_HasCompatibleShallowCopyType(from *Tensor, del bool)(retVal bool) {
retVal, err := ts._HasCompatibleShallowCopyType(from, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_HasSameStorageNumel(other *Tensor, del bool)(retVal bool) {
retVal, err := ts._HasSameStorageNumel(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_HistogramddFromBinCts(out *Tensor, bins []int64, rangeVals []float64, weight *Tensor, density bool, del bool)(retVal *Tensor) {
retVal, err := ts._HistogramddFromBinCts(out, bins, rangeVals, weight, density, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_HistogramddFromBinTensors(bins []*Tensor, weight *Tensor, density bool, del bool)(retVal *Tensor) {
retVal, err := ts._HistogramddFromBinTensors(bins, weight, density, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_HistogramddFromBinTensorsOut(out *Tensor, bins []*Tensor, weight *Tensor, density bool, del bool)(retVal *Tensor) {
retVal, err := ts._HistogramddFromBinTensorsOut(out, bins, weight, density, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IndexPutImpl(indices []*Tensor, values *Tensor, accumulate bool, unsafety bool, del bool)(retVal *Tensor) {
retVal, err := ts._IndexPutImpl(indices, values, accumulate, unsafety, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IndexPutImplOut(out *Tensor, indices []*Tensor, values *Tensor, accumulate bool, unsafety bool, del bool)(retVal *Tensor) {
retVal, err := ts._IndexPutImplOut(out, indices, values, accumulate, unsafety, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Indices(del bool)(retVal *Tensor) {
retVal, err := ts._Indices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IndicesCopy(del bool)(retVal *Tensor) {
retVal, err := ts._IndicesCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IndicesCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._IndicesCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IntMm(mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._IntMm(mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IntMmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._IntMmOut(out, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IsAllTrue(del bool)(retVal *Tensor) {
retVal, err := ts._IsAllTrue(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IsAnyTrue(del bool)(retVal *Tensor) {
retVal, err := ts._IsAnyTrue(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IsZerotensor(del bool)(retVal bool) {
retVal, err := ts._IsZerotensor(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_LinalgDet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _LinalgDet(a)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_LinalgDetResult(result *Tensor, lU *Tensor, pivots *Tensor, a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _LinalgDetResult(result, lU, pivots, a)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_LinalgEigh(a *Tensor, uPLO string, computeV bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _LinalgEigh(a, uPLO, computeV)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_LinalgEighEigenvalues(eigenvalues *Tensor, eigenvectors *Tensor, a *Tensor, uPLO string, computeV bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _LinalgEighEigenvalues(eigenvalues, eigenvectors, a, uPLO, computeV)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_LinalgSlogdet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _LinalgSlogdet(a)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_LinalgSlogdetSign(sign *Tensor, logabsdet *Tensor, lU *Tensor, pivots *Tensor, a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _LinalgSlogdetSign(sign, logabsdet, lU, pivots, a)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_LinalgSolveEx(a *Tensor, b *Tensor, left bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _LinalgSolveEx(a, b, left, checkErrors)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_LinalgSolveExResult(result *Tensor, lU *Tensor, pivots *Tensor, info *Tensor, a *Tensor, b *Tensor, left bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _LinalgSolveExResult(result, lU, pivots, info, a, b, left, checkErrors)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_LinalgSvd(a *Tensor, fullMatrices bool, computeUv bool, driver string)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _LinalgSvd(a, fullMatrices, computeUv, driver)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool, computeUv bool, driver string)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _LinalgSvdU(u, s, vh, a, fullMatrices, computeUv, driver)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._LogSoftmax(dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor) {
retVal, err := _LogSoftmaxBackwardData(gradOutput, output, dim, inputDtype)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor) {
retVal, err := _LogSoftmaxBackwardDataOut(out, gradOutput, output, dim, inputDtype)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._LogSoftmaxOut(out, dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Logcumsumexp(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._Logcumsumexp(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._LogcumsumexpOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_LstmMps(input *Tensor, hx []*Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err := _LstmMps(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5
}
func Must_LstmMpsOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, out4 *Tensor, out5 *Tensor, input *Tensor, hx []*Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err := _LstmMpsOut(out0, out1, out2, out3, out4, out5, input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5
}
func(ts *Tensor) Must_LuWithInfo(pivot bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._LuWithInfo(pivot, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_MakeDepToken(optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _MakeDepToken(optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_MakeDual(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor) {
retVal, err := _MakeDual(primal, tangent, level)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_MakeDualCopy(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor) {
retVal, err := _MakeDualCopy(primal, tangent, level)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_MakeDualCopyOut(out *Tensor, primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor) {
retVal, err := _MakeDualCopyOut(out, primal, tangent, level)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool)(retVal *Tensor) {
retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MakePerChannelQuantizedTensorOut(out *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, del bool)(retVal *Tensor) {
retVal, err := ts._MakePerChannelQuantizedTensorOut(out, scale, zeroPoint, axis, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal *Tensor) {
retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MakePerTensorQuantizedTensorOut(out *Tensor, scale float64, zeroPoint int64, del bool)(retVal *Tensor) {
retVal, err := ts._MakePerTensorQuantizedTensorOut(out, scale, zeroPoint, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MaskedScale(mask *Tensor, scale float64, del bool)(retVal *Tensor) {
retVal, err := ts._MaskedScale(mask, scale, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MaskedScaleOut(out *Tensor, mask *Tensor, scale float64, del bool)(retVal *Tensor) {
retVal, err := ts._MaskedScaleOut(out, mask, scale, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MaskedSoftmax(mask *Tensor, dim []int64, maskType []int64, del bool)(retVal *Tensor) {
retVal, err := ts._MaskedSoftmax(mask, dim, maskType, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_MaskedSoftmaxBackward(gradOutput *Tensor, output *Tensor, mask *Tensor, dim []int64)(retVal *Tensor) {
retVal, err := _MaskedSoftmaxBackward(gradOutput, output, mask, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_MaskedSoftmaxBackwardOut(out *Tensor, gradOutput *Tensor, output *Tensor, mask *Tensor, dim []int64)(retVal *Tensor) {
retVal, err := _MaskedSoftmaxBackwardOut(out, gradOutput, output, mask, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MaskedSoftmaxOut(out *Tensor, mask *Tensor, dim []int64, maskType []int64, del bool)(retVal *Tensor) {
retVal, err := ts._MaskedSoftmaxOut(out, mask, dim, maskType, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MkldnnReshape(shape []int64, del bool)(retVal *Tensor) {
retVal, err := ts._MkldnnReshape(shape, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MkldnnReshapeOut(out *Tensor, shape []int64, del bool)(retVal *Tensor) {
retVal, err := ts._MkldnnReshapeOut(out, shape, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts._MkldnnTranspose(dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64)() {
err := ts._MkldnnTranspose_(dim0, dim1)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_MkldnnTransposeOut(out *Tensor, dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts._MkldnnTransposeOut(out, dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MpsConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts._MpsConvolution(weight, bias, padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MpsConvolutionOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts._MpsConvolutionOut(out, weight, bias, padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MpsConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts._MpsConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MpsConvolutionTransposeOut(out *Tensor, weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts._MpsConvolutionTransposeOut(out, weight, padding, outputPadding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NativeBatchNormLegit(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _NativeBatchNormLegit(input, weight, bias, runningMean, runningVar, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_NativeBatchNormLegitFunctional(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, err := _NativeBatchNormLegitFunctional(input, weight, bias, runningMean, runningVar, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4
}
func Must_NativeBatchNormLegitNoStats(input *Tensor, weight *Tensor, bias *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _NativeBatchNormLegitNoStats(input, weight, bias, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_NativeBatchNormLegitNoStatsOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _NativeBatchNormLegitNoStatsOut(out, saveMean, saveInvstd, input, weight, bias, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_NativeBatchNormLegitNoTraining(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _NativeBatchNormLegitNoTraining(input, weight, bias, runningMean, runningVar, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_NativeBatchNormLegitNoTrainingOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _NativeBatchNormLegitNoTrainingOut(out0, out1, out2, input, weight, bias, runningMean, runningVar, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_NativeBatchNormLegitOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _NativeBatchNormLegitOut(out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_NativeMultiHeadAttention(query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor, needWeights bool, averageAttnWeights bool, maskType []int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _NativeMultiHeadAttention(query, key, value, embedDim, numHead, qkvWeight, qkvBias, projWeight, projBias, mask, needWeights, averageAttnWeights, maskType)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_NativeMultiHeadAttentionOut(out0 *Tensor, out1 *Tensor, query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor, needWeights bool, averageAttnWeights bool, maskType []int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _NativeMultiHeadAttentionOut(out0, out1, query, key, value, embedDim, numHead, qkvWeight, qkvBias, projWeight, projBias, mask, needWeights, averageAttnWeights, maskType)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_NegView(del bool)(retVal *Tensor) {
retVal, err := ts._NegView(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NegViewCopy(del bool)(retVal *Tensor) {
retVal, err := ts._NegViewCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NegViewCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._NegViewCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NestedFromPadded(padded *Tensor, cpuNestedShapeExample *Tensor, fuseTransform0213 bool)(retVal *Tensor) {
retVal, err := _NestedFromPadded(padded, cpuNestedShapeExample, fuseTransform0213)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NestedFromPaddedAndNestedExample(padded *Tensor, ntExample *Tensor)(retVal *Tensor) {
retVal, err := _NestedFromPaddedAndNestedExample(padded, ntExample)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NestedFromPaddedAndNestedExampleOut(out *Tensor, padded *Tensor, ntExample *Tensor)(retVal *Tensor) {
retVal, err := _NestedFromPaddedAndNestedExampleOut(out, padded, ntExample)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NestedFromPaddedOut(out *Tensor, padded *Tensor, cpuNestedShapeExample *Tensor, fuseTransform0213 bool)(retVal *Tensor) {
retVal, err := _NestedFromPaddedOut(out, padded, cpuNestedShapeExample, fuseTransform0213)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NestedSelectBackward(gradOutput *Tensor, dim int64, index int64, del bool)(retVal *Tensor) {
retVal, err := ts._NestedSelectBackward(gradOutput, dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NestedSumBackward(grad *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts._NestedSumBackward(grad, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NestedViewFromBuffer(nestedSize *Tensor, nestedStrides *Tensor, offsets *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._NestedViewFromBuffer(nestedSize, nestedStrides, offsets, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NestedViewFromBufferCopy(nestedSize *Tensor, nestedStrides *Tensor, offsets *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._NestedViewFromBufferCopy(nestedSize, nestedStrides, offsets, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NestedViewFromBufferCopyOut(out *Tensor, nestedSize *Tensor, nestedStrides *Tensor, offsets *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._NestedViewFromBufferCopyOut(out, nestedSize, nestedStrides, offsets, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NewZerosWithSameFeatureMeta(other *Tensor, selfNumBatchDims int64, del bool)(retVal *Tensor) {
retVal, err := ts._NewZerosWithSameFeatureMeta(other, selfNumBatchDims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_NewZerosWithSameFeatureMetaOut(out *Tensor, other *Tensor, selfNumBatchDims int64, del bool)(retVal *Tensor) {
retVal, err := ts._NewZerosWithSameFeatureMetaOut(out, other, selfNumBatchDims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NnpackAvailable()(retVal bool) {
retVal, err := _NnpackAvailable()
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64)(retVal *Tensor) {
retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NnpackSpatialConvolutionOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64)(retVal *Tensor) {
retVal, err := _NnpackSpatialConvolutionOut(out, input, weight, bias, padding, stride)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Nnz(del bool)(retVal int64) {
retVal, err := ts._Nnz(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _PackPaddedSequence(input, lengths, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool)(retVal *Tensor) {
retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_PackPaddedSequenceOut(out0 *Tensor, out1 *Tensor, input *Tensor, lengths *Tensor, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _PackPaddedSequenceOut(out0, out1, input, lengths, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_PadCircular(pad []int64, del bool)(retVal *Tensor) {
retVal, err := ts._PadCircular(pad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_PadEnum(pad []int64, mode int64, value []float64, del bool)(retVal *Tensor) {
retVal, err := ts._PadEnum(pad, mode, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddingValue *Scalar, totalLength int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _PadPackedSequence(data, batchSizes, batchFirst, paddingValue, totalLength)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._PdistBackward(grad, p, pdist, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_PdistBackwardOut(out *Tensor, grad *Tensor, p float64, pdist *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._PdistBackwardOut(out, grad, p, pdist, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_PinMemory(device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts._PinMemory(device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_PinMemoryOut(out *Tensor, device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts._PinMemoryOut(out, device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_PreluKernel(weight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._PreluKernel(weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_PreluKernelBackward(gradOutput *Tensor, weight *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._PreluKernelBackward(gradOutput, weight, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool)(retVal *Tensor) {
retVal, err := ts._RemoveBatchDim(level, batchSize, outDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ReshapeAlias(size []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ReshapeAlias(size, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ReshapeAliasCopy(size []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ReshapeAliasCopy(size, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ReshapeAliasCopyOut(out *Tensor, size []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ReshapeAliasCopyOut(out, size, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ReshapeCopy(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ReshapeCopy(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ReshapeFromTensor(shape *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._ReshapeFromTensor(shape, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ResizeOutput(size []int64, device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts._ResizeOutput(size, device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ResizeOutput_(size []int64, device gotch.Device)() {
err := ts._ResizeOutput_(size, device)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_ResizeOutputOut(out *Tensor, size []int64, device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts._ResizeOutputOut(out, size, device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _RowwisePrune(weight, mask, compressedIndicesDtype)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_SampleDirichlet(del bool)(retVal *Tensor) {
retVal, err := ts._SampleDirichlet(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SampleDirichletOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._SampleDirichletOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SaturateWeightToFp16(weight *Tensor)(retVal *Tensor) {
retVal, err := _SaturateWeightToFp16(weight)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ScaledDotProductAttentionMath(query *Tensor, key *Tensor, value *Tensor, attnMask *Tensor, dropoutP float64, isCausal bool, dropoutMask *Tensor, scale []float64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _ScaledDotProductAttentionMath(query, key, value, attnMask, dropoutP, isCausal, dropoutMask, scale)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_ScaledDotProductEfficientAttention(query *Tensor, key *Tensor, value *Tensor, attnBias *Tensor, computeLogSumexp bool, dropoutP float64, isCausal bool, scale []float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _ScaledDotProductEfficientAttention(query, key, value, attnBias, computeLogSumexp, dropoutP, isCausal, scale)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_ScaledDotProductFlashAttentionBackward(gradOut *Tensor, query *Tensor, key *Tensor, value *Tensor, out *Tensor, logsumexp *Tensor, cumSeqQ *Tensor, cumSeqK *Tensor, maxQ int64, maxK int64, dropoutP float64, isCausal bool, philoxSeed *Tensor, philoxOffset *Tensor, scale []float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _ScaledDotProductFlashAttentionBackward(gradOut, query, key, value, out, logsumexp, cumSeqQ, cumSeqK, maxQ, maxK, dropoutP, isCausal, philoxSeed, philoxOffset, scale)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_ScaledMm(mat2 *Tensor, bias *Tensor, outDtype gotch.DType, scaleA *Tensor, scaleB *Tensor, scaleResult *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._ScaledMm(mat2, bias, outDtype, scaleA, scaleB, scaleResult, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_ScaledMmOut(out *Tensor, outAmax *Tensor, mat2 *Tensor, bias *Tensor, outDtype gotch.DType, scaleA *Tensor, scaleB *Tensor, scaleResult *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._ScaledMmOut(out, outAmax, mat2, bias, outDtype, scaleA, scaleB, scaleResult, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_ScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, includeSelf bool, del bool)(retVal *Tensor) {
retVal, err := ts._ScatterReduce(dim, index, src, reduce, includeSelf, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string, includeSelf bool)() {
err := ts._ScatterReduce_(dim, index, src, reduce, includeSelf)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_ScatterReduceTwoOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, includeSelf bool, del bool)(retVal *Tensor) {
retVal, err := ts._ScatterReduceTwoOut(out, dim, index, src, reduce, includeSelf, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, offsets *Tensor, axis int64, initial *Scalar)(retVal *Tensor) {
retVal, err := _SegmentReduceBackward(grad, output, data, reduce, lengths, offsets, axis, initial)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SegmentReduceBackwardOut(out *Tensor, grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, offsets *Tensor, axis int64, initial *Scalar)(retVal *Tensor) {
retVal, err := _SegmentReduceBackwardOut(out, grad, output, data, reduce, lengths, offsets, axis, initial)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ShapeAsTensor(del bool)(retVal *Tensor) {
retVal, err := ts._ShapeAsTensor(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SlowConv2dBackward(gradInput *Tensor, gradWeight *Tensor, gradBias *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._SlowConv2dBackward(gradInput, gradWeight, gradBias, gradOutput, weight, kernelSize, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int64, numGenerated int64, dtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _SobolEngineDraw(quasi, n, sobolstate, dimension, numGenerated, dtype)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64)() {
err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_SobolEngineInitializeState_(dimension int64)() {
err := ts._SobolEngineInitializeState_(dimension)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_SobolEngineScramble_(ltm *Tensor, dimension int64)() {
err := ts._SobolEngineScramble_(ltm, dimension)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._Softmax(dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor) {
retVal, err := _SoftmaxBackwardData(gradOutput, output, dim, inputDtype)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor, dim int64, inputDtype gotch.DType)(retVal *Tensor) {
retVal, err := _SoftmaxBackwardDataOut(gradInput, gradOutput, output, dim, inputDtype)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._SoftmaxOut(out, dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._SparseAddmm(mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._SparseAddmmOut(out, mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseBroadcastTo(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseBroadcastTo(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseBroadcastToCopy(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseBroadcastToCopy(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseBroadcastToCopyOut(out *Tensor, size []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseBroadcastToCopyOut(out, size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseBscTensorUnsafe(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseBscTensorUnsafe(ccolIndices, rowIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseBsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseBsrTensorUnsafe(crowIndices, colIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCompressedTensorUnsafe(compressedIndices *Tensor, plainIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseCompressedTensorUnsafe(compressedIndices, plainIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, isCoalesced bool)(retVal *Tensor) {
retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice, isCoalesced)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device, isCoalesced bool)(retVal *Tensor) {
retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice, isCoalesced)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCooTensorWithDimsAndTensorsOut(out *Tensor, sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, isCoalesced bool)(retVal *Tensor) {
retVal, err := _SparseCooTensorWithDimsAndTensorsOut(out, sparseDim, denseDim, size, indices, values, isCoalesced)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCooTensorWithDimsOut(out *Tensor, sparseDim int64, denseDim int64, size []int64)(retVal *Tensor) {
retVal, err := _SparseCooTensorWithDimsOut(out, sparseDim, denseDim, size)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCscTensorUnsafe(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseCscTensorUnsafe(ccolIndices, rowIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseCsrProd(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseCsrProd(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseCsrProdDimDtypeOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseCsrProdDimDtypeOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseCsrSum(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseCsrSum(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseCsrSumDimDtypeOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseCsrSumDimDtypeOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseCsrTensorUnsafe(crowIndices, colIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseLogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._SparseLogSoftmax(dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseLogSoftmaxBackwardData(gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseLogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseLogSoftmaxBackwardDataOut(out, gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseLogSoftmaxInt(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseLogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._SparseLogSoftmaxOut(out, dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseMaskProjection(mask *Tensor, accumulateMatches bool, del bool)(retVal *Tensor) {
retVal, err := ts._SparseMaskProjection(mask, accumulateMatches, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseMaskProjectionOut(out *Tensor, mask *Tensor, accumulateMatches bool, del bool)(retVal *Tensor) {
retVal, err := ts._SparseMaskProjectionOut(out, mask, accumulateMatches, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseMm(sparse *Tensor, dense *Tensor)(retVal *Tensor) {
retVal, err := _SparseMm(sparse, dense)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseMmReduce(sparse *Tensor, dense *Tensor, reduce string)(retVal *Tensor) {
retVal, err := _SparseMmReduce(sparse, dense, reduce)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseMmReduceImpl(other *Tensor, reduce string, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._SparseMmReduceImpl(other, reduce, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_SparseSemiStructuredLinear(input *Tensor, weight *Tensor, meta *Tensor, bias *Tensor, activation string)(retVal *Tensor) {
retVal, err := _SparseSemiStructuredLinear(input, weight, meta, bias, activation)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSoftmax(dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSoftmaxBackwardData(gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSoftmaxBackwardDataOut(out, gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSoftmaxInt(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSoftmaxOut(out, dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSparseMatmul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSparseMatmul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSparseMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSparseMatmulOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSum(del bool)(retVal *Tensor) {
retVal, err := ts._SparseSum(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumBackward(grad *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumBackward(grad, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumBackwardOut(out *Tensor, grad *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumBackwardOut(out, grad, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumDim(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumDim(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumDimDtype(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumDimOut(out *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumDimOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumDtype(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumDtype(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_Spdiags(diagonals *Tensor, offsets *Tensor, shape []int64, layout Layout)(retVal *Tensor) {
retVal, err := _Spdiags(diagonals, offsets, shape, layout)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SpdiagsOut(out *Tensor, diagonals *Tensor, offsets *Tensor, shape []int64, layout Layout)(retVal *Tensor) {
retVal, err := _SpdiagsOut(out, diagonals, offsets, shape, layout)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_Stack(tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := _Stack(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_StackOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := _StackOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_StandardGamma(del bool)(retVal *Tensor) {
retVal, err := ts._StandardGamma(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_StandardGammaGrad(output *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._StandardGammaGrad(output, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_StandardGammaGradOut(out *Tensor, output *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._StandardGammaGradOut(out, output, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_StandardGammaOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._StandardGammaOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestAmbiguousDefaults(dummy *Tensor, a int64, b int64)(retVal *Tensor) {
retVal, err := _TestAmbiguousDefaults(dummy, a, b)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string)(retVal *Tensor) {
retVal, err := _TestAmbiguousDefaultsB(dummy, a, b)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestAutogradMultipleDispatch(del bool)(retVal *Tensor) {
retVal, err := ts._TestAutogradMultipleDispatch(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestAutogradMultipleDispatchFullcoverageOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._TestAutogradMultipleDispatchFullcoverageOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestAutogradMultipleDispatchNtonly(b bool, del bool)(retVal *Tensor) {
retVal, err := ts._TestAutogradMultipleDispatchNtonly(b, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestAutogradMultipleDispatchView(del bool)(retVal *Tensor) {
retVal, err := ts._TestAutogradMultipleDispatchView(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestAutogradMultipleDispatchViewCopy(del bool)(retVal *Tensor) {
retVal, err := ts._TestAutogradMultipleDispatchViewCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestAutogradMultipleDispatchViewCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._TestAutogradMultipleDispatchViewCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestCheckTensor(del bool)(retVal *Tensor) {
retVal, err := ts._TestCheckTensor(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestFunctorchFallback(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._TestFunctorchFallback(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestFunctorchFallbackOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._TestFunctorchFallbackOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestOptionalFilledIntlist(values *Tensor, addends []int64)(retVal *Tensor) {
retVal, err := _TestOptionalFilledIntlist(values, addends)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestOptionalFilledIntlistOut(out *Tensor, values *Tensor, addends []int64)(retVal *Tensor) {
retVal, err := _TestOptionalFilledIntlistOut(out, values, addends)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestOptionalFloatlist(values *Tensor, addends []float64)(retVal *Tensor) {
retVal, err := _TestOptionalFloatlist(values, addends)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestOptionalFloatlistOut(out *Tensor, values *Tensor, addends []float64)(retVal *Tensor) {
retVal, err := _TestOptionalFloatlistOut(out, values, addends)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestOptionalIntlist(values *Tensor, addends []int64)(retVal *Tensor) {
retVal, err := _TestOptionalIntlist(values, addends)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestOptionalIntlistOut(out *Tensor, values *Tensor, addends []int64)(retVal *Tensor) {
retVal, err := _TestOptionalIntlistOut(out, values, addends)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestSerializationSubcmul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._TestSerializationSubcmul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestStringDefault(dummy *Tensor, a string, b string)(retVal *Tensor) {
retVal, err := _TestStringDefault(dummy, a, b)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestWarnInAutograd(del bool)(retVal *Tensor) {
retVal, err := ts._TestWarnInAutograd(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestWarnInAutogradOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._TestWarnInAutogradOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._ToCopy(optionsKind, optionsDevice, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToCopyOut(out *Tensor, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._ToCopyOut(out, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToDense(dtype gotch.DType, maskedGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts._ToDense(dtype, maskedGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToDenseOut(out *Tensor, dtype gotch.DType, maskedGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts._ToDenseOut(out, dtype, maskedGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparse(layout Layout, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparse(layout, blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseBsc(blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseBsc(blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseBscOut(out *Tensor, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseBscOut(out, blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseBsr(blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseBsr(blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseBsrOut(out *Tensor, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseBsrOut(out, blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseCsc(denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseCsc(denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseCscOut(out *Tensor, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseCscOut(out, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseCsr(denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseCsr(denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseCsrOut(out *Tensor, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseCsrOut(out, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseOut(out *Tensor, layout Layout, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseOut(out, layout, blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ToSparseSemiStructured(dense *Tensor)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _ToSparseSemiStructured(dense)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_ToSparseSparseDim(sparseDim int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseSparseDim(sparseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToSparseSparseDimOut(out *Tensor, sparseDim int64, del bool)(retVal *Tensor) {
retVal, err := ts._ToSparseSparseDimOut(out, sparseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TransformBiasRescaleQkv(qkv *Tensor, qkvBias *Tensor, numHeads int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _TransformBiasRescaleQkv(qkv, qkvBias, numHeads)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_TransformBiasRescaleQkvOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, qkv *Tensor, qkvBias *Tensor, numHeads int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := _TransformBiasRescaleQkvOut(out0, out1, out2, qkv, qkvBias, numHeads)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_TransformerEncoderLayerFwd(src *Tensor, embedDim int64, numHeads int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, useGelu bool, normFirst bool, eps float64, normWeight1 *Tensor, normBias1 *Tensor, normWeight2 *Tensor, normBias2 *Tensor, ffnWeight1 *Tensor, ffnBias1 *Tensor, ffnWeight2 *Tensor, ffnBias2 *Tensor, mask *Tensor, maskType []int64)(retVal *Tensor) {
retVal, err := _TransformerEncoderLayerFwd(src, embedDim, numHeads, qkvWeight, qkvBias, projWeight, projBias, useGelu, normFirst, eps, normWeight1, normBias1, normWeight2, normBias2, ffnWeight1, ffnBias1, ffnWeight2, ffnBias2, mask, maskType)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TransformerEncoderLayerFwdOut(out *Tensor, src *Tensor, embedDim int64, numHeads int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, useGelu bool, normFirst bool, eps float64, normWeight1 *Tensor, normBias1 *Tensor, normWeight2 *Tensor, normBias2 *Tensor, ffnWeight1 *Tensor, ffnBias1 *Tensor, ffnWeight2 *Tensor, ffnBias2 *Tensor, mask *Tensor, maskType []int64)(retVal *Tensor) {
retVal, err := _TransformerEncoderLayerFwdOut(out, src, embedDim, numHeads, qkvWeight, qkvBias, projWeight, projBias, useGelu, normFirst, eps, normWeight1, normBias1, normWeight2, normBias2, ffnWeight1, ffnBias1, ffnWeight2, ffnBias2, mask, maskType)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal *Tensor) {
retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TrilinearOut(out *Tensor, i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal *Tensor) {
retVal, err := _TrilinearOut(out, i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TritonMultiHeadAttention(query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor)(retVal *Tensor) {
retVal, err := _TritonMultiHeadAttention(query, key, value, embedDim, numHead, qkvWeight, qkvBias, projWeight, projBias, mask)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TritonMultiHeadAttentionOut(out *Tensor, query *Tensor, key *Tensor, value *Tensor, embedDim int64, numHead int64, qkvWeight *Tensor, qkvBias *Tensor, projWeight *Tensor, projBias *Tensor, mask *Tensor)(retVal *Tensor) {
retVal, err := _TritonMultiHeadAttentionOut(out, query, key, value, embedDim, numHead, qkvWeight, qkvBias, projWeight, projBias, mask)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TritonScaledDotAttention(q *Tensor, k *Tensor, v *Tensor, dropoutP float64)(retVal *Tensor) {
retVal, err := _TritonScaledDotAttention(q, k, v, dropoutP)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TritonScaledDotAttentionOut(out *Tensor, q *Tensor, k *Tensor, v *Tensor, dropoutP float64)(retVal *Tensor) {
retVal, err := _TritonScaledDotAttentionOut(out, q, k, v, dropoutP)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Unique(sorted bool, returnInverse bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._Unique(sorted, returnInverse, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_Unique2(sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._Unique2(sorted, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_Unique2Out(out0 *Tensor, out1 *Tensor, out2 *Tensor, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._Unique2Out(out0, out1, out2, sorted, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_UniqueOut(out0 *Tensor, out1 *Tensor, sorted bool, returnInverse bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._UniqueOut(out0, out1, sorted, returnInverse, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_UnpackDual(dual *Tensor, level int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _UnpackDual(dual, level)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_UnsafeIndex(indices []*Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._UnsafeIndex(indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UnsafeIndexPut(indices []*Tensor, values *Tensor, accumulate bool, del bool)(retVal *Tensor) {
retVal, err := ts._UnsafeIndexPut(indices, values, accumulate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UnsafeView(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts._UnsafeView(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UnsafeViewOut(out *Tensor, size []int64, del bool)(retVal *Tensor) {
retVal, err := ts._UnsafeViewOut(out, size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleBicubic2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleBicubic2dAa(outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleBicubic2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := _UpsampleBicubic2dAaBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleBicubic2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := _UpsampleBicubic2dAaBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleBicubic2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleBicubic2dAaOut(out, outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleBicubic2dAaVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor) {
retVal, err := _UpsampleBicubic2dAaVec(input, outputSize, alignCorners, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleBilinear2dAa(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleBilinear2dAa(outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleBilinear2dAaBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := _UpsampleBilinear2dAaBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleBilinear2dAaBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := _UpsampleBilinear2dAaBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleBilinear2dAaOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleBilinear2dAaOut(out, outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleBilinear2dAaVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor) {
retVal, err := _UpsampleBilinear2dAaVec(input, outputSize, alignCorners, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleNearestExact1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleNearestExact1d(outputSize, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact1dBackward(gradOutput, outputSize, inputSize, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleNearestExact1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleNearestExact1dOut(out, outputSize, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact1dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact1dVec(input, outputSize, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleNearestExact2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleNearestExact2d(outputSize, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleNearestExact2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleNearestExact2dOut(out, outputSize, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact2dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact2dVec(input, outputSize, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleNearestExact3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleNearestExact3d(outputSize, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_UpsampleNearestExact3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts._UpsampleNearestExact3dOut(out, outputSize, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UpsampleNearestExact3dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor) {
retVal, err := _UpsampleNearestExact3dVec(input, outputSize, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64)(retVal bool) {
retVal, err := _UseCudnnCtcLoss(logProbs, targets, inputLengths, targetLengths, blank)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UseCudnnCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64)(retVal bool) {
retVal, err := _UseCudnnCtcLossTensor(logProbs, targets, inputLengths, targetLengths, blank)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UseCudnnRnnFlattenWeight()(retVal bool) {
retVal, err := _UseCudnnRnnFlattenWeight()
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Values(del bool)(retVal *Tensor) {
retVal, err := ts._Values(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ValuesCopy(del bool)(retVal *Tensor) {
retVal, err := ts._ValuesCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ValuesCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._ValuesCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Version(del bool)(retVal int64) {
retVal, err := ts._Version(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_WeightNorm(v *Tensor, g *Tensor, dim int64)(retVal *Tensor) {
retVal, err := _WeightNorm(v, g, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _WeightNormDifferentiableBackward(gradW, savedV, savedG, savedNorms, dim)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_WeightNormInterface(v *Tensor, g *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _WeightNormInterface(v, g, dim)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_WeightNormInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _WeightNormInterfaceBackward(gradW, savedV, savedG, savedNorms, dim)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_WeightNormInterfaceBackwardOut(out0 *Tensor, out1 *Tensor, gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _WeightNormInterfaceBackwardOut(out0, out1, gradW, savedV, savedG, savedNorms, dim)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_WeightNormInterfaceOut(out0 *Tensor, out1 *Tensor, v *Tensor, g *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _WeightNormInterfaceOut(out0, out1, v, g, dim)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAbs(del bool)(retVal *Tensor) {
retVal, err := ts.Abs(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAbs_()() {
err := ts.Abs_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAbsOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AbsOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAbsolute(del bool)(retVal *Tensor) {
retVal, err := ts.Absolute(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAbsolute_()() {
err := ts.Absolute_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAbsoluteOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AbsoluteOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAcos(del bool)(retVal *Tensor) {
retVal, err := ts.Acos(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAcos_()() {
err := ts.Acos_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAcosOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AcosOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAcosh(del bool)(retVal *Tensor) {
retVal, err := ts.Acosh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAcosh_()() {
err := ts.Acosh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAcoshOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AcoshOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool1d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool2d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool3d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool3dBackward(gradInput, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool1d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool1d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdaptiveMaxPool2d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool2d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveMaxPool2dBackwardGradInput(gradInput, gradOutput, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool2dOut(out, indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdaptiveMaxPool3d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool3d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveMaxPool3dBackwardGradInput(gradInput, gradOutput, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool3dOut(out, indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdd(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Add(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdd_(other *Tensor)() {
err := ts.Add_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.AddScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddScalar_(other *Scalar)() {
err := ts.AddScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.AddScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addbmm(batch1, batch2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddbmm_(batch1 *Tensor, batch2 *Tensor)() {
err := ts.Addbmm_(batch1, batch2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddbmmOut(out, batch1, batch2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addcdiv(tensor1, tensor2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddcdiv_(tensor1 *Tensor, tensor2 *Tensor)() {
err := ts.Addcdiv_(tensor1, tensor2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddcmul(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addcmul(tensor1, tensor2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddcmul_(tensor1 *Tensor, tensor2 *Tensor)() {
err := ts.Addcmul_(tensor1, tensor2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addmm(mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddmm_(mat1 *Tensor, mat2 *Tensor)() {
err := ts.Addmm_(mat1, mat2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddmmOut(out, mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddmv(mat *Tensor, vec *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addmv(mat, vec, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddmv_(mat *Tensor, vec *Tensor)() {
err := ts.Addmv_(mat, vec)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddmvOut(out, mat, vec, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddr(vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addr(vec1, vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddr_(vec1 *Tensor, vec2 *Tensor)() {
err := ts.Addr_(vec1, vec2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddrOut(out, vec1, vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdjoint(del bool)(retVal *Tensor) {
retVal, err := ts.Adjoint(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustAffineGridGenerator(theta *Tensor, size []int64, alignCorners bool)(retVal *Tensor) {
retVal, err := AffineGridGenerator(theta, size, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustAffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool)(retVal *Tensor) {
retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustAffineGridGeneratorOut(out *Tensor, theta *Tensor, size []int64, alignCorners bool)(retVal *Tensor) {
retVal, err := AffineGridGeneratorOut(out, theta, size, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAlias(del bool)(retVal *Tensor) {
retVal, err := ts.Alias(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAliasCopy(del bool)(retVal *Tensor) {
retVal, err := ts.AliasCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAliasCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AliasCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAlignAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AlignAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAll(del bool)(retVal *Tensor) {
retVal, err := ts.All(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAllAllOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AllAllOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAllDim(dim int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AllDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAllOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AllOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAllclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal bool) {
retVal, err := ts.Allclose(other, rtol, atol, equalNan, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor) {
retVal, err := AlphaDropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAlphaDropout_(p float64, train bool)() {
err := ts.AlphaDropout_(p, train)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAmax(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Amax(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AmaxOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAmin(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Amin(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AminOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAminmax(dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Aminmax(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAminmaxOut(min *Tensor, max *Tensor, dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AminmaxOut(min, max, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAngle(del bool)(retVal *Tensor) {
retVal, err := ts.Angle(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAngleOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AngleOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAny(del bool)(retVal *Tensor) {
retVal, err := ts.Any(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAnyAllOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AnyAllOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAnyDim(dim int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AnyDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAnyOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AnyOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustArange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Arange(end, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := ArangeStart(start, end, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustArangeStartStep(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := ArangeStartStep(start, end, step, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArccos(del bool)(retVal *Tensor) {
retVal, err := ts.Arccos(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArccos_()() {
err := ts.Arccos_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArccosOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArccosOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArccosh(del bool)(retVal *Tensor) {
retVal, err := ts.Arccosh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArccosh_()() {
err := ts.Arccosh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArccoshOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArccoshOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArcsin(del bool)(retVal *Tensor) {
retVal, err := ts.Arcsin(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArcsin_()() {
err := ts.Arcsin_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArcsinOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArcsinOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArcsinh(del bool)(retVal *Tensor) {
retVal, err := ts.Arcsinh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArcsinh_()() {
err := ts.Arcsinh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArcsinhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArcsinhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctan(del bool)(retVal *Tensor) {
retVal, err := ts.Arctan(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctan2(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Arctan2(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctan2_(other *Tensor)() {
err := ts.Arctan2_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArctan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Arctan2Out(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctan_()() {
err := ts.Arctan_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArctanOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArctanOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctanh(del bool)(retVal *Tensor) {
retVal, err := ts.Arctanh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctanh_()() {
err := ts.Arctanh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArctanhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArctanhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgmax(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Argmax(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.ArgmaxOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgmin(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Argmin(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.ArgminOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgsort(dim int64, descending bool, del bool)(retVal *Tensor) {
retVal, err := ts.Argsort(dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgsortStable(stable bool, dim int64, descending bool, del bool)(retVal *Tensor) {
retVal, err := ts.ArgsortStable(stable, dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgsortStableOut(out *Tensor, stable bool, dim int64, descending bool, del bool)(retVal *Tensor) {
retVal, err := ts.ArgsortStableOut(out, stable, dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgwhere(del bool)(retVal *Tensor) {
retVal, err := ts.Argwhere(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsStrided(size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AsStrided(size, stride, storageOffset, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset []int64)() {
err := ts.AsStrided_(size, stride, storageOffset)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAsStridedCopy(size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AsStridedCopy(size, stride, storageOffset, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsStridedCopyOut(out *Tensor, size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AsStridedCopyOut(out, size, stride, storageOffset, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsStridedScatter(src *Tensor, size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AsStridedScatter(src, size, stride, storageOffset, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsStridedScatterOut(out *Tensor, src *Tensor, size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AsStridedScatterOut(out, src, size, stride, storageOffset, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsin(del bool)(retVal *Tensor) {
retVal, err := ts.Asin(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsin_()() {
err := ts.Asin_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAsinOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AsinOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsinh(del bool)(retVal *Tensor) {
retVal, err := ts.Asinh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsinh_()() {
err := ts.Asinh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAsinhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AsinhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtan(del bool)(retVal *Tensor) {
retVal, err := ts.Atan(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtan2(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Atan2(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtan2_(other *Tensor)() {
err := ts.Atan2_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAtan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Atan2Out(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtan_()() {
err := ts.Atan_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAtanOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AtanOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtanh(del bool)(retVal *Tensor) {
retVal, err := ts.Atanh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtanh_()() {
err := ts.Atanh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAtanhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AtanhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtleast1d(del bool)(retVal *Tensor) {
retVal, err := ts.Atleast1d(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtleast2d(del bool)(retVal *Tensor) {
retVal, err := ts.Atleast2d(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtleast3d(del bool)(retVal *Tensor) {
retVal, err := ts.Atleast3d(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool2dBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool3dBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBaddbmm(batch1 *Tensor, batch2 *Tensor, beta *Scalar, alpha *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Baddbmm(batch1, batch2, beta, alpha, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBaddbmm_(batch1 *Tensor, batch2 *Tensor)() {
err := ts.Baddbmm_(batch1, batch2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BaddbmmOut(out, batch1, batch2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBartlettWindowOut(out *Tensor, windowLength int64)(retVal *Tensor) {
retVal, err := BartlettWindowOut(out, windowLength)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := BartlettWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBartlettWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor) {
retVal, err := BartlettWindowPeriodicOut(out, windowLength, periodic)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor) {
retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, sumDy *Tensor, sumDyXmu *Tensor, count *Tensor)(retVal *Tensor) {
retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, sumDy, sumDyXmu, count)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormBackwardElemtOut(out *Tensor, gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, sumDy *Tensor, sumDyXmu *Tensor, count *Tensor)(retVal *Tensor) {
retVal, err := BatchNormBackwardElemtOut(out, gradOut, input, mean, invstd, weight, sumDy, sumDyXmu, count)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormBackwardReduce(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := BatchNormBackwardReduce(gradOut, input, mean, invstd, weight, inputG, weightG, biasG)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustBatchNormBackwardReduceOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := BatchNormBackwardReduceOut(out0, out1, out2, out3, gradOut, input, mean, invstd, weight, inputG, weightG, biasG)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustBatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor) {
retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor) {
retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormGatherStats(input, mean, invstd, runningMean, runningVar, momentum, eps, count)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormGatherStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormGatherStatsOut(out0, out1, input, mean, invstd, runningMean, runningVar, momentum, eps, count)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormGatherStatsWithCounts(input, mean, invstd, runningMean, runningVar, momentum, eps, counts)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormGatherStatsWithCountsOut(out0 *Tensor, out1 *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormGatherStatsWithCountsOut(out0, out1, input, mean, invstd, runningMean, runningVar, momentum, eps, counts)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormStats(input *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormStats(input, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormStatsOut(out0, out1, input, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormUpdateStats(input, runningMean, runningVar, momentum)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormUpdateStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormUpdateStatsOut(out0, out1, input, runningMean, runningVar, momentum)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustBernoulli(del bool)(retVal *Tensor) {
retVal, err := ts.Bernoulli(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBernoulli_(p *Tensor)() {
err := ts.Bernoulli_(p)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBernoulliFloat_(p float64)() {
err := ts.BernoulliFloat_(p)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBernoulliP(p float64, del bool)(retVal *Tensor) {
retVal, err := ts.BernoulliP(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBernoulliTensor(p *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BernoulliTensor(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := Bilinear(input1, input2, weight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyBackwardGradInput(gradInput, gradOutput, target, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyWithLogitsOut(out *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyWithLogitsOut(out, target, weight, posWeight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBincount(weights *Tensor, minlength int64, del bool)(retVal *Tensor) {
retVal, err := ts.Bincount(weights, minlength, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBincountOut(out *Tensor, weights *Tensor, minlength int64, del bool)(retVal *Tensor) {
retVal, err := ts.BincountOut(out, weights, minlength, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBinomial(count *Tensor, prob *Tensor)(retVal *Tensor) {
retVal, err := Binomial(count, prob)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBinomialOut(out *Tensor, count *Tensor, prob *Tensor)(retVal *Tensor) {
retVal, err := BinomialOut(out, count, prob)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseAnd(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseAnd(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseAnd_(other *Scalar)() {
err := ts.BitwiseAnd_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseAndScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseAndScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBitwiseAndScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseAndScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBitwiseAndScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseAndScalarTensorOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseAndTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseAndTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseAndTensor_(other *Tensor)() {
err := ts.BitwiseAndTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseAndTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseAndTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShift(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseLeftShift(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShift_(other *Tensor)() {
err := ts.BitwiseLeftShift_(other)
if err != nil { log.Fatal(err) }
return
}
func MustBitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseLeftShiftScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBitwiseLeftShiftScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseLeftShiftScalarTensorOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseLeftShiftTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseLeftShiftTensorScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShiftTensorScalar_(other *Scalar)() {
err := ts.BitwiseLeftShiftTensorScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseLeftShiftTensorScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseNot(del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseNot(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseNot_()() {
err := ts.BitwiseNot_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseNotOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseNotOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseOr(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseOr(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseOr_(other *Scalar)() {
err := ts.BitwiseOr_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseOrScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseOrScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBitwiseOrScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseOrScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBitwiseOrScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseOrScalarTensorOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseOrTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseOrTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseOrTensor_(other *Tensor)() {
err := ts.BitwiseOrTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseOrTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseOrTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShift(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseRightShift(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShift_(other *Tensor)() {
err := ts.BitwiseRightShift_(other)
if err != nil { log.Fatal(err) }
return
}
func MustBitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseRightShiftScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBitwiseRightShiftScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseRightShiftScalarTensorOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseRightShiftTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseRightShiftTensorScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShiftTensorScalar_(other *Scalar)() {
err := ts.BitwiseRightShiftTensorScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseRightShiftTensorScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseXor(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseXor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseXor_(other *Scalar)() {
err := ts.BitwiseXor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseXorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseXorScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBitwiseXorScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseXorScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBitwiseXorScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseXorScalarTensorOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseXorTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseXorTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseXorTensor_(other *Tensor)() {
err := ts.BitwiseXorTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseXorTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseXorTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlackmanWindowOut(out *Tensor, windowLength int64)(retVal *Tensor) {
retVal, err := BlackmanWindowOut(out, windowLength)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := BlackmanWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlackmanWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor) {
retVal, err := BlackmanWindowPeriodicOut(out, windowLength, periodic)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlockDiag(tensors []*Tensor)(retVal *Tensor) {
retVal, err := BlockDiag(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlockDiagOut(out *Tensor, tensors []*Tensor)(retVal *Tensor) {
retVal, err := BlockDiagOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBmm(mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Bmm(mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBmmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BmmOut(out, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBroadcastTo(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.BroadcastTo(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBucketize(boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) {
retVal, err := ts.Bucketize(boundaries, outInt32, right, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBucketizeScalar(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool)(retVal *Tensor) {
retVal, err := BucketizeScalar(selfScalar, boundaries, outInt32, right)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBucketizeScalarOut(out *Tensor, selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool)(retVal *Tensor) {
retVal, err := BucketizeScalarOut(out, selfScalar, boundaries, outInt32, right)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBucketizeTensorOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) {
retVal, err := ts.BucketizeTensorOut(out, boundaries, outInt32, right, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCanCast(from gotch.DType, to gotch.DType)(retVal bool) {
retVal, err := CanCast(from, to)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCartesianProd(tensors []*Tensor)(retVal *Tensor) {
retVal, err := CartesianProd(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCat(tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := Cat(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCatOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := CatOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCauchy(median float64, sigma float64, del bool)(retVal *Tensor) {
retVal, err := ts.Cauchy(median, sigma, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCauchy_(median float64, sigma float64)() {
err := ts.Cauchy_(median, sigma)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCauchyOut(out *Tensor, median float64, sigma float64, del bool)(retVal *Tensor) {
retVal, err := ts.CauchyOut(out, median, sigma, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCcolIndices(del bool)(retVal *Tensor) {
retVal, err := ts.CcolIndices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCcolIndicesCopy(del bool)(retVal *Tensor) {
retVal, err := ts.CcolIndicesCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCcolIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CcolIndicesCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64)(retVal *Tensor) {
retVal, err := Cdist(x1, x2, p, computeMode)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCeil(del bool)(retVal *Tensor) {
retVal, err := ts.Ceil(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCeil_()() {
err := ts.Ceil_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCeilOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CeilOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCelu(del bool)(retVal *Tensor) {
retVal, err := ts.Celu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCelu_()() {
err := ts.Celu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCeluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CeluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustChainMatmul(matrices []*Tensor)(retVal *Tensor) {
retVal, err := ChainMatmul(matrices)
if err != nil { log.Fatal(err) }
return retVal
}
func MustChainMatmulOut(out *Tensor, matrices []*Tensor)(retVal *Tensor) {
retVal, err := ChainMatmulOut(out, matrices)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustChalf(del bool)(retVal *Tensor) {
retVal, err := ts.Chalf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustChannelShuffle(groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.ChannelShuffle(groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustChannelShuffleOut(out *Tensor, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.ChannelShuffleOut(out, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholesky(upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.Cholesky(upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskyInverse(upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskyInverse(upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskyInverseOut(out *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskyInverseOut(out, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskyOut(out, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskySolve(input2 *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskySolve(input2, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskySolveOut(out, input2, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float64, bitWidth int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ChooseQparamsOptimized(input, numel, nBins, ratio, bitWidth)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustClamp(min *Scalar, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Clamp(min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClamp_(min *Scalar, max *Scalar)() {
err := ts.Clamp_(min, max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMax(max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMax(max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMax_(max *Scalar)() {
err := ts.ClampMax_(max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMaxOut(out *Tensor, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMaxOut(out, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMaxTensor(max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMaxTensor(max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMaxTensor_(max *Tensor)() {
err := ts.ClampMaxTensor_(max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMaxTensorOut(out *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMaxTensorOut(out, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMin(min *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMin(min, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMin_(min *Scalar)() {
err := ts.ClampMin_(min)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMinOut(out *Tensor, min *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMinOut(out, min, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMinTensor(min *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMinTensor(min, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMinTensor_(min *Tensor)() {
err := ts.ClampMinTensor_(min)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMinTensorOut(out *Tensor, min *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMinTensorOut(out, min, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampOut(out, min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampTensor(min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampTensor_(min *Tensor, max *Tensor)() {
err := ts.ClampTensor_(min, max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampTensorOut(out, min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClip(min *Scalar, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Clip(min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClip_(min *Scalar, max *Scalar)() {
err := ts.Clip_(min, max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClipOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClipOut(out, min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClipTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClipTensor(min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClipTensor_(min *Tensor, max *Tensor)() {
err := ts.ClipTensor_(min, max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClipTensorOut(out, min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClone(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Clone(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCoalesce(del bool)(retVal *Tensor) {
retVal, err := ts.Coalesce(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCol2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustColIndices(del bool)(retVal *Tensor) {
retVal, err := ts.ColIndices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustColIndicesCopy(del bool)(retVal *Tensor) {
retVal, err := ts.ColIndicesCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustColIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ColIndicesCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustColumnStack(tensors []*Tensor)(retVal *Tensor) {
retVal, err := ColumnStack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustColumnStackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor) {
retVal, err := ColumnStackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCombinations(r int64, withReplacement bool, del bool)(retVal *Tensor) {
retVal, err := ts.Combinations(r, withReplacement, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustComplex(real *Tensor, imag *Tensor)(retVal *Tensor) {
retVal, err := Complex(real, imag)
if err != nil { log.Fatal(err) }
return retVal
}
func MustComplexOut(out *Tensor, real *Tensor, imag *Tensor)(retVal *Tensor) {
retVal, err := ComplexOut(out, real, imag)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConcat(tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := Concat(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConcatOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := ConcatOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConcatenate(tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := Concatenate(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConcatenateOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := ConcatenateOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConj(del bool)(retVal *Tensor) {
retVal, err := ts.Conj(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConjPhysical(del bool)(retVal *Tensor) {
retVal, err := ts.ConjPhysical(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConjPhysical_()() {
err := ts.ConjPhysical_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ConjPhysicalOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConstantPadNd(pad []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConstantPadNd(pad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConstantPadNdOut(out *Tensor, pad []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConstantPadNdOut(out, pad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustContiguous(del bool)(retVal *Tensor) {
retVal, err := ts.Contiguous(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv1dPadding(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv2dPadding(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv3dPadding(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConvDepthwise3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConvDepthwise3d(weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConvDepthwise3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConvDepthwise3dOut(out, weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConvTbc(weight, bias, pad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.ConvTbcBackward(input, weight, bias, pad, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustConvTbcOut(out *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConvTbcOut(out, weight, bias, pad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) {
retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) {
retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) {
retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) {
retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvolutionOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) {
retVal, err := ConvolutionOut(out, input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) {
retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvolutionOverrideableOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) {
retVal, err := ConvolutionOverrideableOut(out, input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopy(src *Tensor, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts.Copy(src, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopyOut(out *Tensor, src *Tensor, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts.CopyOut(out, src, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopySparseToSparse(src *Tensor, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts.CopySparseToSparse(src, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopySparseToSparse_(src *Tensor, nonBlocking bool)() {
err := ts.CopySparseToSparse_(src, nonBlocking)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCopySparseToSparseOut(out *Tensor, src *Tensor, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts.CopySparseToSparseOut(out, src, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopysign(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Copysign(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopysign_(other *Tensor)() {
err := ts.Copysign_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCopysignOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CopysignOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopysignScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.CopysignScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopysignScalar_(other *Scalar)() {
err := ts.CopysignScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCopysignScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.CopysignScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCorrcoef(del bool)(retVal *Tensor) {
retVal, err := ts.Corrcoef(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCos(del bool)(retVal *Tensor) {
retVal, err := ts.Cos(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCos_()() {
err := ts.Cos_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCosOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CosOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCosh(del bool)(retVal *Tensor) {
retVal, err := ts.Cosh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCosh_()() {
err := ts.Cosh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCoshOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CoshOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor) {
retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64)(retVal *Tensor) {
retVal, err := CosineSimilarity(x1, x2, dim, eps)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCountNonzero(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.CountNonzero(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCountNonzeroDimIntlist(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.CountNonzeroDimIntlist(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCountNonzeroDimIntlistOut(out *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.CountNonzeroDimIntlistOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCountNonzeroOut(out *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.CountNonzeroOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCov(correction int64, fweights *Tensor, aweights *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Cov(correction, fweights, aweights, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCross(other *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Cross(other, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCrossEntropyLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, labelSmoothing float64, del bool)(retVal *Tensor) {
retVal, err := ts.CrossEntropyLoss(target, weight, reduction, ignoreIndex, labelSmoothing, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCrossOut(out *Tensor, other *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.CrossOut(out, other, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCrowIndices(del bool)(retVal *Tensor) {
retVal, err := ts.CrowIndices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCrowIndicesCopy(del bool)(retVal *Tensor) {
retVal, err := ts.CrowIndicesCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCrowIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CrowIndicesCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor) {
retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor) {
retVal, err := CtcLossTensor(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) {
retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) {
retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnAffineGridGeneratorBackwardOut(out *Tensor, grad *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) {
retVal, err := CudnnAffineGridGeneratorBackwardOut(out, grad, n, c, h, w)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnAffineGridGeneratorOut(out *Tensor, theta *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) {
retVal, err := CudnnAffineGridGeneratorOut(out, theta, n, c, h, w)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := CudnnBatchNorm(input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustCudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := CudnnBatchNormBackward(input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon, reserveSpace)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustCudnnBatchNormBackwardOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := CudnnBatchNormBackwardOut(out0, out1, out2, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon, reserveSpace)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustCudnnBatchNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := CudnnBatchNormOut(out0, out1, out2, out3, input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func(ts *Tensor) MustCudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionAddRelu(weight, z, alpha, bias, stride, padding, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionAddReluOut(out *Tensor, weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionAddReluOut(out, weight, z, alpha, bias, stride, padding, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionOut(out *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionOut(out, weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionRelu(weight, bias, stride, padding, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionReluOut(out *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionReluOut(out, weight, bias, stride, padding, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, allowTf32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionTransposeOut(out *Tensor, weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionTransposeOut(out, weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, allowTf32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnGridSampler(grid *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnGridSampler(grid, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.CudnnGridSamplerBackward(grid, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCudnnGridSamplerBackwardOut(out0 *Tensor, out1 *Tensor, grid *Tensor, gradOutput *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.CudnnGridSamplerBackwardOut(out0, out1, grid, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCudnnGridSamplerOut(out *Tensor, grid *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnGridSamplerOut(out, grid, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnIsAcceptable(del bool)(retVal bool) {
retVal, err := ts.CudnnIsAcceptable(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCummax(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Cummax(dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCummaxOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.CummaxOut(values, indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustCummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64)(retVal *Tensor) {
retVal, err := CummaxminBackward(grad, input, indices, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCummin(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Cummin(dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCumminOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.CumminOut(values, indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Cumprod(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCumprod_(dim int64, dtype gotch.DType)() {
err := ts.Cumprod_(dim, dtype)
if err != nil { log.Fatal(err) }
return
}
func MustCumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor)(retVal *Tensor) {
retVal, err := CumprodBackward(grad, input, dim, output)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.CumprodOut(out, dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Cumsum(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCumsum_(dim int64, dtype gotch.DType)() {
err := ts.Cumsum_(dim, dtype)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.CumsumOut(out, dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCumulativeTrapezoid(y *Tensor, dim int64)(retVal *Tensor) {
retVal, err := CumulativeTrapezoid(y, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) {
retVal, err := CumulativeTrapezoidX(y, x, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustData(del bool)(retVal *Tensor) {
retVal, err := ts.Data(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDeg2rad(del bool)(retVal *Tensor) {
retVal, err := ts.Deg2rad(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDeg2rad_()() {
err := ts.Deg2rad_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDeg2radOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Deg2radOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDenseDim(del bool)(retVal int64) {
retVal, err := ts.DenseDim(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDequantize(del bool)(retVal *Tensor) {
retVal, err := ts.Dequantize(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDet(del bool)(retVal *Tensor) {
retVal, err := ts.Det(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDetach(del bool)(retVal *Tensor) {
retVal, err := ts.Detach(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDetach_()() {
err := ts.Detach_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDetachCopy(del bool)(retVal *Tensor) {
retVal, err := ts.DetachCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDetachCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DetachCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiag(diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.Diag(diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagEmbed(offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagEmbedOut(out *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagEmbedOut(out, offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagOut(out, diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagflat(offset int64, del bool)(retVal *Tensor) {
retVal, err := ts.Diagflat(offset, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.Diagonal(offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64)(retVal *Tensor) {
retVal, err := DiagonalBackward(gradOutput, inputSizes, offset, dim1, dim2)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDiagonalBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64)(retVal *Tensor) {
retVal, err := DiagonalBackwardOut(out, gradOutput, inputSizes, offset, dim1, dim2)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagonalCopy(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagonalCopy(offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagonalCopyOut(out *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagonalCopyOut(out, offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagonalScatter(src *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagonalScatter(src, offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagonalScatterOut(out *Tensor, src *Tensor, offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagonalScatterOut(out, src, offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiff(n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Diff(n, dim, prepend, append, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DiffOut(out, n, dim, prepend, append, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDigamma(del bool)(retVal *Tensor) {
retVal, err := ts.Digamma(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDigamma_()() {
err := ts.Digamma_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDigammaOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DigammaOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDist(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Dist(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDistOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DistOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiv(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Div(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiv_(other *Tensor)() {
err := ts.Div_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DivOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivOutMode(out, other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.DivScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivScalar_(other *Scalar)() {
err := ts.DivScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivScalarMode(other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivScalarMode_(other *Scalar, roundingMode string)() {
err := ts.DivScalarMode_(other, roundingMode)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivScalarModeOut(out *Tensor, other *Scalar, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivScalarModeOut(out, other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.DivScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivTensorMode(other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivTensorMode_(other *Tensor, roundingMode string)() {
err := ts.DivTensorMode_(other, roundingMode)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivide(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Divide(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivide_(other *Tensor)() {
err := ts.Divide_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DivideOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivideOutMode(out, other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.DivideScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideScalar_(other *Scalar)() {
err := ts.DivideScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivideScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivideScalarMode(other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideScalarMode_(other *Scalar, roundingMode string)() {
err := ts.DivideScalarMode_(other, roundingMode)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivideTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivideTensorMode(other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideTensorMode_(other *Tensor, roundingMode string)() {
err := ts.DivideTensorMode_(other, roundingMode)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDot(tensor *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Dot(tensor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDotOut(out *Tensor, tensor *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DotOut(out, tensor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDropout(input *Tensor, p float64, train bool)(retVal *Tensor) {
retVal, err := Dropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDropout_(p float64, train bool)() {
err := ts.Dropout_(p, train)
if err != nil { log.Fatal(err) }
return
}
func MustDstack(tensors []*Tensor)(retVal *Tensor) {
retVal, err := Dstack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor) {
retVal, err := DstackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEinsum(equation string, tensors []*Tensor, path []int64)(retVal *Tensor) {
retVal, err := Einsum(equation, tensors, path)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustElu(del bool)(retVal *Tensor) {
retVal, err := ts.Elu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustElu_()() {
err := ts.Elu_()
if err != nil { log.Fatal(err) }
return
}
func MustEluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor) {
retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, isResult, selfOrResult)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor) {
retVal, err := EluBackwardGradInput(gradInput, gradOutput, alpha, scale, inputScale, isResult, selfOrResult)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.EluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) {
retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) {
retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := EmbeddingBag(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustEmbeddingBagPaddingIdx(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx []int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := EmbeddingBagPaddingIdx(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustEmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) {
retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbeddingDenseBackwardOut(out *Tensor, gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) {
retVal, err := EmbeddingDenseBackwardOut(out, gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbeddingOut(out *Tensor, weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) {
retVal, err := EmbeddingOut(out, weight, indices, paddingIdx, scaleGradByFreq, sparse)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEmbeddingRenorm(indices *Tensor, maxNorm float64, normType float64, del bool)(retVal *Tensor) {
retVal, err := ts.EmbeddingRenorm(indices, maxNorm, normType, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64)() {
err := ts.EmbeddingRenorm_(indices, maxNorm, normType)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustEmbeddingRenormOut(out *Tensor, indices *Tensor, maxNorm float64, normType float64, del bool)(retVal *Tensor) {
retVal, err := ts.EmbeddingRenormOut(out, indices, maxNorm, normType, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) {
retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Empty(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEmptyLike(del bool)(retVal *Tensor) {
retVal, err := ts.EmptyLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEmptyLikeOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.EmptyLikeOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := EmptyOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyPermuted(size []int64, physicalLayout []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := EmptyPermuted(size, physicalLayout, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyPermutedOut(out *Tensor, size []int64, physicalLayout []int64)(retVal *Tensor) {
retVal, err := EmptyPermutedOut(out, size, physicalLayout)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := EmptyQuantized(size, qtensor, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyQuantizedOut(out *Tensor, size []int64, qtensor *Tensor)(retVal *Tensor) {
retVal, err := EmptyQuantizedOut(out, size, qtensor)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyStridedOut(out *Tensor, size []int64, stride []int64)(retVal *Tensor) {
retVal, err := EmptyStridedOut(out, size, stride)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEq(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Eq(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEq_(other *Scalar)() {
err := ts.Eq_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustEqScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.EqScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEqTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.EqTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEqTensor_(other *Tensor)() {
err := ts.EqTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustEqTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.EqTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEqual(other *Tensor, del bool)(retVal bool) {
retVal, err := ts.Equal(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErf(del bool)(retVal *Tensor) {
retVal, err := ts.Erf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErf_()() {
err := ts.Erf_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustErfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ErfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErfc(del bool)(retVal *Tensor) {
retVal, err := ts.Erfc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErfc_()() {
err := ts.Erfc_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustErfcOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ErfcOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErfinv(del bool)(retVal *Tensor) {
retVal, err := ts.Erfinv(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErfinv_()() {
err := ts.Erfinv_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustErfinvOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ErfinvOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExp(del bool)(retVal *Tensor) {
retVal, err := ts.Exp(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExp2(del bool)(retVal *Tensor) {
retVal, err := ts.Exp2(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExp2_()() {
err := ts.Exp2_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustExp2Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Exp2Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExp_()() {
err := ts.Exp_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustExpOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ExpOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpand(size []int64, implicit bool, del bool)(retVal *Tensor) {
retVal, err := ts.Expand(size, implicit, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpandAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ExpandAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpandCopy(size []int64, implicit bool, del bool)(retVal *Tensor) {
retVal, err := ts.ExpandCopy(size, implicit, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpandCopyOut(out *Tensor, size []int64, implicit bool, del bool)(retVal *Tensor) {
retVal, err := ts.ExpandCopyOut(out, size, implicit, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpm1(del bool)(retVal *Tensor) {
retVal, err := ts.Expm1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpm1_()() {
err := ts.Expm1_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustExpm1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Expm1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExponential(lambd float64, del bool)(retVal *Tensor) {
retVal, err := ts.Exponential(lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExponential_(lambd float64)() {
err := ts.Exponential_(lambd)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustExponentialOut(out *Tensor, lambd float64, del bool)(retVal *Tensor) {
retVal, err := ts.ExponentialOut(out, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Eye(n, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := EyeM(n, m, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEyeMOut(out *Tensor, n int64, m int64)(retVal *Tensor) {
retVal, err := EyeMOut(out, n, m)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEyeOut(out *Tensor, n int64)(retVal *Tensor) {
retVal, err := EyeOut(out, n)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) {
retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FakeQuantizePerChannelAffineCachemask(scale, zeroPoint, axis, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustFakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor) {
retVal, err := FakeQuantizePerChannelAffineCachemaskBackward(grad, mask)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerChannelAffineCachemaskOut(out0 *Tensor, out1 *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FakeQuantizePerChannelAffineCachemaskOut(out0, out1, scale, zeroPoint, axis, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) {
retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FakeQuantizePerTensorAffineCachemask(scale, zeroPoint, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustFakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor) {
retVal, err := FakeQuantizePerTensorAffineCachemaskBackward(grad, mask)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerTensorAffineCachemaskOut(out0 *Tensor, out1 *Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FakeQuantizePerTensorAffineCachemaskOut(out0, out1, scale, zeroPoint, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool)(retVal *Tensor) {
retVal, err := ts.FakeQuantizePerTensorAffineTensorQparams(scale, zeroPoint, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor) {
retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor) {
retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmPackGemmMatrixFp16(input *Tensor)(retVal *Tensor) {
retVal, err := FbgemmPackGemmMatrixFp16(input)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmPackQuantizedMatrix(input *Tensor)(retVal *Tensor) {
retVal, err := FbgemmPackQuantizedMatrix(input)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64)(retVal *Tensor) {
retVal, err := FbgemmPackQuantizedMatrixKn(input, k, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFeatureAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor) {
retVal, err := FeatureAlphaDropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFeatureAlphaDropout_(p float64, train bool)() {
err := ts.FeatureAlphaDropout_(p, train)
if err != nil { log.Fatal(err) }
return
}
func MustFeatureDropout(input *Tensor, p float64, train bool)(retVal *Tensor) {
retVal, err := FeatureDropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFeatureDropout_(p float64, train bool)() {
err := ts.FeatureDropout_(p, train)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFftFft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := FftFftfreq(n, d, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFftFftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor) {
retVal, err := FftFftfreqOut(out, n, d)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFftshift(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.FftFftshift(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftHfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftHfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftHfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftHfft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftHfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftHfft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftHfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftHfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftHfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftHfftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftHfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftHfftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfftshift(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfftshift(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIhfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIhfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIhfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIhfft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIhfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIhfft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIhfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIhfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIhfftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIhfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIhfftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := FftRfftfreq(n, d, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFftRfftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor) {
retVal, err := FftRfftfreqOut(out, n, d)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFill(value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Fill(value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFill_(value *Scalar)() {
err := ts.Fill_(value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFillDiagonal_(fillValue *Scalar, wrap bool)() {
err := ts.FillDiagonal_(fillValue, wrap)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFillScalarOut(out *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FillScalarOut(out, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFillTensor(value *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FillTensor(value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFillTensor_(value *Tensor)() {
err := ts.FillTensor_(value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFillTensorOut(out *Tensor, value *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FillTensorOut(out, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFix(del bool)(retVal *Tensor) {
retVal, err := ts.Fix(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFix_()() {
err := ts.Fix_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFixOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FixOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFlatten(startDim int64, endDim int64, del bool)(retVal *Tensor) {
retVal, err := ts.Flatten(startDim, endDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFlattenDenseTensors(tensors []*Tensor)(retVal *Tensor) {
retVal, err := FlattenDenseTensors(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFlip(dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Flip(dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFlipOut(out *Tensor, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.FlipOut(out, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFliplr(del bool)(retVal *Tensor) {
retVal, err := ts.Fliplr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFlipud(del bool)(retVal *Tensor) {
retVal, err := ts.Flipud(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPower(exponent *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloatPower(exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPower_(exponent *Scalar)() {
err := ts.FloatPower_(exponent)
if err != nil { log.Fatal(err) }
return
}
func MustFloatPowerScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) {
retVal, err := FloatPowerScalar(selfScalar, exponent)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) {
retVal, err := FloatPowerScalarOut(out, selfScalar, exponent)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPowerTensor_(exponent *Tensor)() {
err := ts.FloatPowerTensor_(exponent)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFloatPowerTensorScalar(exponent *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FloatPowerTensorScalar(exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FloatPowerTensorScalarOut(out, exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloatPowerTensorTensorOut(out, exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloor(del bool)(retVal *Tensor) {
retVal, err := ts.Floor(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloor_()() {
err := ts.Floor_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFloorDivide(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloorDivide(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloorDivide_(other *Tensor)() {
err := ts.FloorDivide_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFloorDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloorDivideOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloorDivideScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FloorDivideScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloorDivideScalar_(other *Scalar)() {
err := ts.FloorDivideScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFloorOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloorOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmax(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Fmax(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FmaxOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmin(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Fmin(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFminOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FminOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmod(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Fmod(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmod_(other *Scalar)() {
err := ts.Fmod_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFmodScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FmodScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmodTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FmodTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmodTensor_(other *Tensor)() {
err := ts.FmodTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFmodTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FmodTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFrac(del bool)(retVal *Tensor) {
retVal, err := ts.Frac(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFrac_()() {
err := ts.Frac_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFracOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FracOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool2d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FractionalMaxPool2d(kernelSize, outputSize, randomSamples, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FractionalMaxPool2dBackwardGradInput(gradInput, gradOutput, kernelSize, outputSize, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FractionalMaxPool2dOutput(output, indices, kernelSize, outputSize, randomSamples, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFractionalMaxPool3d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FractionalMaxPool3d(kernelSize, outputSize, randomSamples, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FractionalMaxPool3dBackwardGradInput(gradInput, gradOutput, kernelSize, outputSize, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FractionalMaxPool3dOutput(output, indices, kernelSize, outputSize, randomSamples, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFrexp(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Frexp(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FrexpTensorOut(mantissa, exponent, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFrobeniusNorm(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.FrobeniusNorm(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFromFileOut(out *Tensor, filename string, shared bool, size []int64)(retVal *Tensor) {
retVal, err := FromFileOut(out, filename, shared, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Full(size, fillValue, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFullLike(fillValue *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FullLike(fillValue, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFullLikeOut(out *Tensor, fillValue *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FullLikeOut(out, fillValue, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFullOut(out *Tensor, size []int64, fillValue *Scalar)(retVal *Tensor) {
retVal, err := FullOut(out, size, fillValue)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFusedMovingAvgObsFakeQuant(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal *Tensor) {
retVal, err := ts.FusedMovingAvgObsFakeQuant(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGather(dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts.Gather(dim, index, sparseGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts.GatherBackward(grad, dim, index, sparseGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGcd(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Gcd(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGcd_(other *Tensor)() {
err := ts.Gcd_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGcdOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GcdOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGe(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Ge(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGe_(other *Scalar)() {
err := ts.Ge_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GeScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GeTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeTensor_(other *Tensor)() {
err := ts.GeTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GeTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGelu(approximate string, del bool)(retVal *Tensor) {
retVal, err := ts.Gelu(approximate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGelu_(approximate string)() {
err := ts.Gelu_(approximate)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGeluBackward(gradOutput *Tensor, approximate string, del bool)(retVal *Tensor) {
retVal, err := ts.GeluBackward(gradOutput, approximate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, approximate string, del bool)(retVal *Tensor) {
retVal, err := ts.GeluBackwardGradInput(gradInput, gradOutput, approximate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeluOut(out *Tensor, approximate string, del bool)(retVal *Tensor) {
retVal, err := ts.GeluOut(out, approximate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeometric(p float64, del bool)(retVal *Tensor) {
retVal, err := ts.Geometric(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeometric_(p float64)() {
err := ts.Geometric_(p)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGeometricOut(out *Tensor, p float64, del bool)(retVal *Tensor) {
retVal, err := ts.GeometricOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeqrf(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Geqrf(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustGeqrfA(a *Tensor, tau *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.GeqrfA(a, tau, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustGer(vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Ger(vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGerOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GerOut(out, vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGlu(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.Glu(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGluBackward(gradOutput *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.GluBackward(gradOutput, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.GluBackwardGradInput(gradInput, gradOutput, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGluBackwardJvp(gradX *Tensor, gradGlu *Tensor, x *Tensor, dgradGlu *Tensor, dx *Tensor, dim int64)(retVal *Tensor) {
retVal, err := GluBackwardJvp(gradX, gradGlu, x, dgradGlu, dx, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGluBackwardJvpOut(out *Tensor, gradX *Tensor, gradGlu *Tensor, x *Tensor, dgradGlu *Tensor, dx *Tensor, dim int64)(retVal *Tensor) {
retVal, err := GluBackwardJvpOut(out, gradX, gradGlu, x, dgradGlu, dx, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGluJvp(glu *Tensor, x *Tensor, dx *Tensor, dim int64)(retVal *Tensor) {
retVal, err := GluJvp(glu, x, dx, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGluJvpOut(out *Tensor, glu *Tensor, x *Tensor, dx *Tensor, dim int64)(retVal *Tensor) {
retVal, err := GluJvpOut(out, glu, x, dx, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGluOut(out *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.GluOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGrad(del bool)(retVal *Tensor) {
retVal, err := ts.Grad(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreater(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Greater(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreater_(other *Scalar)() {
err := ts.Greater_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGreaterEqual(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterEqual(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterEqual_(other *Scalar)() {
err := ts.GreaterEqual_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGreaterEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterEqualScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterEqualTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterEqualTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterEqualTensor_(other *Tensor)() {
err := ts.GreaterEqualTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGreaterEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterEqualTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterTensor_(other *Tensor)() {
err := ts.GreaterTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGreaterTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler2dOut(out *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := GridSampler2dOut(out, input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler3dOut(out *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := GridSampler3dOut(out, input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool)(retVal *Tensor) {
retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGru(input *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := Gru(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) {
retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGruData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := GruData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustGt(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Gt(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGt_(other *Scalar)() {
err := ts.Gt_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GtScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGtTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GtTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGtTensor_(other *Tensor)() {
err := ts.GtTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GtTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowOut(out *Tensor, windowLength int64)(retVal *Tensor) {
retVal, err := HammingWindowOut(out, windowLength)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HammingWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HammingWindowPeriodicAlpha(windowLength, periodic, alpha, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HammingWindowPeriodicAlphaBeta(windowLength, periodic, alpha, beta, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodicAlphaBetaOut(out *Tensor, windowLength int64, periodic bool, alpha float64, beta float64)(retVal *Tensor) {
retVal, err := HammingWindowPeriodicAlphaBetaOut(out, windowLength, periodic, alpha, beta)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodicAlphaOut(out *Tensor, windowLength int64, periodic bool, alpha float64)(retVal *Tensor) {
retVal, err := HammingWindowPeriodicAlphaOut(out, windowLength, periodic, alpha)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor) {
retVal, err := HammingWindowPeriodicOut(out, windowLength, periodic)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HannWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHannWindowOut(out *Tensor, windowLength int64)(retVal *Tensor) {
retVal, err := HannWindowOut(out, windowLength)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HannWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHannWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor) {
retVal, err := HannWindowPeriodicOut(out, windowLength, periodic)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardshrink(del bool)(retVal *Tensor) {
retVal, err := ts.Hardshrink(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.HardshrinkBackward(gradOut, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.HardshrinkBackwardGradInput(gradInput, gradOut, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardshrinkOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardshrinkOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardsigmoid(del bool)(retVal *Tensor) {
retVal, err := ts.Hardsigmoid(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardsigmoid_()() {
err := ts.Hardsigmoid_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHardsigmoidBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardsigmoidBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardsigmoidBackwardGradInput(gradInput, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardsigmoidOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardsigmoidOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardswish(del bool)(retVal *Tensor) {
retVal, err := ts.Hardswish(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardswish_()() {
err := ts.Hardswish_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHardswishBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardswishBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardswishBackwardOut(out *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardswishBackwardOut(out, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardswishOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardswishOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardtanh(del bool)(retVal *Tensor) {
retVal, err := ts.Hardtanh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardtanh_()() {
err := ts.Hardtanh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.HardtanhBackwardGradInput(gradInput, gradOutput, minVal, maxVal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardtanhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardtanhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHeaviside(values *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Heaviside(values, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHeaviside_(values *Tensor)() {
err := ts.Heaviside_(values)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHeavisideOut(out *Tensor, values *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HeavisideOut(out, values, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHistc(bins int64, del bool)(retVal *Tensor) {
retVal, err := ts.Histc(bins, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHistcOut(out *Tensor, bins int64, del bool)(retVal *Tensor) {
retVal, err := ts.HistcOut(out, bins, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) {
retVal, err := Hspmm(mat1, mat2)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) {
retVal, err := HspmmOut(out, mat1, mat2)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHstack(tensors []*Tensor)(retVal *Tensor) {
retVal, err := Hstack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor) {
retVal, err := HstackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHuberLoss(target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) {
retVal, err := ts.HuberLoss(target, reduction, delta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHuberLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) {
retVal, err := ts.HuberLossBackward(gradOutput, target, reduction, delta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) {
retVal, err := ts.HuberLossBackwardOut(gradInput, gradOutput, target, reduction, delta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHuberLossOut(out *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) {
retVal, err := ts.HuberLossOut(out, target, reduction, delta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHypot(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Hypot(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHypot_(other *Tensor)() {
err := ts.Hypot_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHypotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HypotOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustI0(del bool)(retVal *Tensor) {
retVal, err := ts.I0(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustI0_()() {
err := ts.I0_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustI0Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.I0Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIgamma(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Igamma(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIgamma_(other *Tensor)() {
err := ts.Igamma_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIgammaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IgammaOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIgammac(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Igammac(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIgammac_(other *Tensor)() {
err := ts.Igammac_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIgammacOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IgammacOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIm2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustImag(del bool)(retVal *Tensor) {
retVal, err := ts.Imag(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexAdd(dim, index, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexAdd_(dim int64, index *Tensor, source *Tensor)() {
err := ts.IndexAdd_(dim, index, source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexAddOut(out *Tensor, dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexAddOut(out, dim, index, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexCopy(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexCopy(dim, index, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexCopy_(dim int64, index *Tensor, source *Tensor)() {
err := ts.IndexCopy_(dim, index, source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexCopyOut(out *Tensor, dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexCopyOut(out, dim, index, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexFill(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.IndexFill(dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexFill_(dim int64, index *Tensor, value *Scalar)() {
err := ts.IndexFill_(dim, index, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexFillIntScalarOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.IndexFillIntScalarOut(out, dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexFillIntTensor(dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexFillIntTensor_(dim int64, index *Tensor, value *Tensor)() {
err := ts.IndexFillIntTensor_(dim, index, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexFillIntTensorOut(out *Tensor, dim int64, index *Tensor, value *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexFillIntTensorOut(out, dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexPutOut(out *Tensor, indices []*Tensor, values *Tensor, accumulate bool, del bool)(retVal *Tensor) {
retVal, err := ts.IndexPutOut(out, indices, values, accumulate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexReduce(dim int64, index *Tensor, source *Tensor, reduce string, includeSelf bool, del bool)(retVal *Tensor) {
retVal, err := ts.IndexReduce(dim, index, source, reduce, includeSelf, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexReduce_(dim int64, index *Tensor, source *Tensor, reduce string, includeSelf bool)() {
err := ts.IndexReduce_(dim, index, source, reduce, includeSelf)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexReduceOut(out *Tensor, dim int64, index *Tensor, source *Tensor, reduce string, includeSelf bool, del bool)(retVal *Tensor) {
retVal, err := ts.IndexReduceOut(out, dim, index, source, reduce, includeSelf, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexSelect(dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor)(retVal *Tensor) {
retVal, err := IndexSelectBackward(grad, selfSizes, dim, index)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexSelectOut(out, dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexTensorOut(out *Tensor, indices []*Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexTensorOut(out, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndices(del bool)(retVal *Tensor) {
retVal, err := ts.Indices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndicesCopy(del bool)(retVal *Tensor) {
retVal, err := ts.IndicesCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndicesCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInfinitelyDifferentiableGeluBackward(grad *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.InfinitelyDifferentiableGeluBackward(grad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInner(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Inner(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInnerOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.InnerOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustInstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor) {
retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIntRepr(del bool)(retVal *Tensor) {
retVal, err := ts.IntRepr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIntReprOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IntReprOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInverse(del bool)(retVal *Tensor) {
retVal, err := ts.Inverse(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInverseOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.InverseOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsCoalesced(del bool)(retVal bool) {
retVal, err := ts.IsCoalesced(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsComplex(del bool)(retVal bool) {
retVal, err := ts.IsComplex(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsConj(del bool)(retVal bool) {
retVal, err := ts.IsConj(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsDistributed(del bool)(retVal bool) {
retVal, err := ts.IsDistributed(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsFloatingPoint(del bool)(retVal bool) {
retVal, err := ts.IsFloatingPoint(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsInference(del bool)(retVal bool) {
retVal, err := ts.IsInference(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsLeaf(del bool)(retVal bool) {
retVal, err := ts.IsLeaf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsNeg(del bool)(retVal bool) {
retVal, err := ts.IsNeg(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsNonzero(del bool)(retVal bool) {
retVal, err := ts.IsNonzero(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsPinned(device gotch.Device, del bool)(retVal bool) {
retVal, err := ts.IsPinned(device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsSameSize(other *Tensor, del bool)(retVal bool) {
retVal, err := ts.IsSameSize(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsSetTo(tensor *Tensor, del bool)(retVal bool) {
retVal, err := ts.IsSetTo(tensor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsSigned(del bool)(retVal bool) {
retVal, err := ts.IsSigned(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsVulkanAvailable()(retVal bool) {
retVal, err := IsVulkanAvailable()
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal *Tensor) {
retVal, err := ts.Isclose(other, rtol, atol, equalNan, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsfinite(del bool)(retVal *Tensor) {
retVal, err := ts.Isfinite(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsin(elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := Isin(elements, testElements, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinScalarTensor(element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinScalarTensor(element, testElements, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinScalarTensorOut(out *Tensor, element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinScalarTensorOut(out, element, testElements, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinTensorScalar(elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinTensorScalar(elements, testElement, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinTensorScalarOut(out *Tensor, elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinTensorScalarOut(out, elements, testElement, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinTensorTensorOut(out *Tensor, elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinTensorTensorOut(out, elements, testElements, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsinf(del bool)(retVal *Tensor) {
retVal, err := ts.Isinf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsinfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IsinfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsnan(del bool)(retVal *Tensor) {
retVal, err := ts.Isnan(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsnanOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IsnanOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsneginf(del bool)(retVal *Tensor) {
retVal, err := ts.Isneginf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsneginfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IsneginfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsposinf(del bool)(retVal *Tensor) {
retVal, err := ts.Isposinf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsposinfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IsposinfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsreal(del bool)(retVal *Tensor) {
retVal, err := ts.Isreal(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIstft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool)(retVal *Tensor) {
retVal, err := ts.Istft(nFft, hopLength, winLength, window, center, normalized, onesided, length, returnComplex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := KaiserWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := KaiserWindowBeta(windowLength, periodic, beta, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindowBetaOut(out *Tensor, windowLength int64, periodic bool, beta float64)(retVal *Tensor) {
retVal, err := KaiserWindowBetaOut(out, windowLength, periodic, beta)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindowOut(out *Tensor, windowLength int64)(retVal *Tensor) {
retVal, err := KaiserWindowOut(out, windowLength)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := KaiserWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVal *Tensor) {
retVal, err := KaiserWindowPeriodicOut(out, windowLength, periodic)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKlDiv(target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor) {
retVal, err := ts.KlDiv(target, reduction, logTarget, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKron(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Kron(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKronOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.KronOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKthvalue(k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Kthvalue(k, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustKthvalueValues(values *Tensor, indices *Tensor, k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.KthvalueValues(values, indices, k, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustL1Loss(target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.L1Loss(target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool)(retVal *Tensor) {
retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLcm(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Lcm(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLcm_(other *Tensor)() {
err := ts.Lcm_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLcmOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LcmOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLdexp(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Ldexp(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLdexp_(other *Tensor)() {
err := ts.Ldexp_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLdexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LdexpOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLe(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Le(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLe_(other *Scalar)() {
err := ts.Le_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LeScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LeTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeTensor_(other *Tensor)() {
err := ts.LeTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LeTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeakyRelu(del bool)(retVal *Tensor) {
retVal, err := ts.LeakyRelu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeakyRelu_()() {
err := ts.LeakyRelu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor) {
retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor) {
retVal, err := ts.LeakyReluBackwardGradInput(gradInput, gradOutput, negativeSlope, selfIsResult, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeakyReluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LeakyReluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLerp(end *Tensor, weight *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Lerp(end, weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLerp_(end *Tensor, weight *Scalar)() {
err := ts.Lerp_(end, weight)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LerpScalarOut(out, end, weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLerpTensor(end *Tensor, weight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LerpTensor(end, weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLerpTensor_(end *Tensor, weight *Tensor)() {
err := ts.LerpTensor_(end, weight)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LerpTensorOut(out, end, weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLess(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Less(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLess_(other *Scalar)() {
err := ts.Less_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLessEqual(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LessEqual(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessEqual_(other *Scalar)() {
err := ts.LessEqual_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLessEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LessEqualScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessEqualTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LessEqualTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessEqualTensor_(other *Tensor)() {
err := ts.LessEqualTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLessEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LessEqualTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LessScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LessTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessTensor_(other *Tensor)() {
err := ts.LessTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLessTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LessTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLgamma(del bool)(retVal *Tensor) {
retVal, err := ts.Lgamma(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLgamma_()() {
err := ts.Lgamma_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLgammaOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LgammaOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLift(del bool)(retVal *Tensor) {
retVal, err := ts.Lift(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLiftFresh(del bool)(retVal *Tensor) {
retVal, err := ts.LiftFresh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLiftFreshCopy(del bool)(retVal *Tensor) {
retVal, err := ts.LiftFreshCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLiftFreshCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LiftFreshCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLiftOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LiftOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCholesky(upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCholesky(upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCholeskyEx(upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgCholeskyEx(upper, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgCholeskyExL(l *Tensor, info *Tensor, upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgCholeskyExL(l, info, upper, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCholeskyOut(out, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCond(p *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCond(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCondOut(out *Tensor, p *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCondOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCondPStr(p string, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCondPStr(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCondPStrOut(out *Tensor, p string, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCondPStrOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCross(other *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCross(other, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCrossOut(out *Tensor, other *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCrossOut(out, other, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgDet(a *Tensor)(retVal *Tensor) {
retVal, err := LinalgDet(a)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgDetOut(out *Tensor, a *Tensor)(retVal *Tensor) {
retVal, err := LinalgDetOut(out, a)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgDiagonal(a *Tensor, offset int64, dim1 int64, dim2 int64)(retVal *Tensor) {
retVal, err := LinalgDiagonal(a, offset, dim1, dim2)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgEig(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgEig(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgEigOut(eigenvalues, eigenvectors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgEigh(uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgEigh(uPLO, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgEighEigvals(eigvals, eigvecs, uPLO, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgEigvals(del bool)(retVal *Tensor) {
retVal, err := ts.LinalgEigvals(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgEigvalsOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgEigvalsOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgEigvalsh(uPLO string, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgEigvalsh(uPLO, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgEigvalshOut(out *Tensor, uPLO string, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgEigvalshOut(out, uPLO, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgHouseholderProduct(input *Tensor, tau *Tensor)(retVal *Tensor) {
retVal, err := LinalgHouseholderProduct(input, tau)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor)(retVal *Tensor) {
retVal, err := LinalgHouseholderProductOut(out, input, tau)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgInv(a *Tensor)(retVal *Tensor) {
retVal, err := LinalgInv(a)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgInvEx(a *Tensor, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgInvEx(a, checkErrors)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgInvExInverse(inverse *Tensor, info *Tensor, a *Tensor, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgInvExInverse(inverse, info, a, checkErrors)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgInvOut(out *Tensor, a *Tensor)(retVal *Tensor) {
retVal, err := LinalgInvOut(out, a)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgLdlFactor(hermitian bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgLdlFactor(hermitian, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgLdlFactorEx(hermitian bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.LinalgLdlFactorEx(hermitian, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustLinalgLdlFactorExOut(lD *Tensor, pivots *Tensor, info *Tensor, hermitian bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.LinalgLdlFactorExOut(lD, pivots, info, hermitian, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustLinalgLdlFactorOut(lD *Tensor, pivots *Tensor, hermitian bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgLdlFactorOut(lD, pivots, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgLdlSolve(lD *Tensor, pivots *Tensor, b *Tensor, hermitian bool)(retVal *Tensor) {
retVal, err := LinalgLdlSolve(lD, pivots, b, hermitian)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgLdlSolveOut(out *Tensor, lD *Tensor, pivots *Tensor, b *Tensor, hermitian bool)(retVal *Tensor) {
retVal, err := LinalgLdlSolveOut(out, lD, pivots, b, hermitian)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgLstsq(b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := ts.LinalgLstsq(b, rcond, driver, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func(ts *Tensor) MustLinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tensor, singularValues *Tensor, b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := ts.LinalgLstsqOut(solution, residuals, rank, singularValues, b, rcond, driver, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustLinalgLu(a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LinalgLu(a, pivot)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLinalgLuFactor(a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgLuFactor(a, pivot)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgLuFactorEx(a *Tensor, pivot bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LinalgLuFactorEx(a, pivot, checkErrors)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLinalgLuFactorExOut(lU *Tensor, pivots *Tensor, info *Tensor, a *Tensor, pivot bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LinalgLuFactorExOut(lU, pivots, info, a, pivot, checkErrors)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLinalgLuFactorOut(lU *Tensor, pivots *Tensor, a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgLuFactorOut(lU, pivots, a, pivot)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgLuOut(p *Tensor, l *Tensor, u *Tensor, a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LinalgLuOut(p, l, u, a, pivot)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLinalgLuSolve(lU *Tensor, pivots *Tensor, b *Tensor, left bool, adjoint bool)(retVal *Tensor) {
retVal, err := LinalgLuSolve(lU, pivots, b, left, adjoint)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgLuSolveOut(out *Tensor, lU *Tensor, pivots *Tensor, b *Tensor, left bool, adjoint bool)(retVal *Tensor) {
retVal, err := LinalgLuSolveOut(out, lU, pivots, b, left, adjoint)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatmul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatmul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatmulOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixExp(del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixExp(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixExpOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixExpOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixPower(n int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixPower(n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixPowerOut(out, n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixRank(tol float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixRank(tol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixRankAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixRankAtolRtolFloat(atol, rtol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixRankAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixRankAtolRtolFloatOut(out, atol, rtol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMatrixRankAtolRtolTensor(input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool)(retVal *Tensor) {
retVal, err := LinalgMatrixRankAtolRtolTensor(input, atol, rtol, hermitian)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMatrixRankAtolRtolTensorOut(out *Tensor, input *Tensor, atol *Tensor, rtol *Tensor, hermitian bool)(retVal *Tensor) {
retVal, err := LinalgMatrixRankAtolRtolTensorOut(out, input, atol, rtol, hermitian)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixRankOut(out *Tensor, tol float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixRankOut(out, tol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor) {
retVal, err := LinalgMatrixRankOutTolTensor(out, input, tol, hermitian)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor) {
retVal, err := LinalgMatrixRankTolTensor(input, tol, hermitian)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMultiDot(tensors []*Tensor)(retVal *Tensor) {
retVal, err := LinalgMultiDot(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMultiDotOut(out *Tensor, tensors []*Tensor)(retVal *Tensor) {
retVal, err := LinalgMultiDotOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgNorm(ord, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgNormOrdStr(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgNormOrdStr(ord, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgNormOrdStrOut(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgNormOrdStrOut(out, ord, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgNormOut(out, ord, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinv(rcond float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinv(rcond, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvAtolRtolFloat(atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvAtolRtolFloat(atol, rtol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvAtolRtolFloatOut(out *Tensor, atol []float64, rtol []float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvAtolRtolFloatOut(out, atol, rtol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvAtolRtolTensor(atol *Tensor, rtol *Tensor, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvAtolRtolTensor(atol, rtol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvAtolRtolTensorOut(out *Tensor, atol *Tensor, rtol *Tensor, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvAtolRtolTensorOut(out, atol, rtol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvOut(out, rcond, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvOutRcondTensor(out, rcond, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvRcondTensor(rcond, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgQr(a *Tensor, mode string)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgQr(a, mode)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgQrOut(q *Tensor, r *Tensor, a *Tensor, mode string)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgQrOut(q, r, a, mode)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgSlogdet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgSlogdet(a)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, a *Tensor)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgSlogdetOut(sign, logabsdet, a)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgSolve(a *Tensor, b *Tensor, left bool)(retVal *Tensor) {
retVal, err := LinalgSolve(a, b, left)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgSolveEx(a *Tensor, b *Tensor, left bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgSolveEx(a, b, left, checkErrors)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgSolveExOut(result *Tensor, info *Tensor, a *Tensor, b *Tensor, left bool, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LinalgSolveExOut(result, info, a, b, left, checkErrors)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgSolveOut(out *Tensor, a *Tensor, b *Tensor, left bool)(retVal *Tensor) {
retVal, err := LinalgSolveOut(out, a, b, left)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgSolveTriangular(b *Tensor, upper bool, left bool, unitriangular bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgSolveTriangular(b, upper, left, unitriangular, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgSolveTriangularOut(out *Tensor, b *Tensor, upper bool, left bool, unitriangular bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgSolveTriangularOut(out, b, upper, left, unitriangular, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgSvd(a *Tensor, fullMatrices bool, driver string)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LinalgSvd(a, fullMatrices, driver)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool, driver string)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LinalgSvdU(u, s, vh, a, fullMatrices, driver)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLinalgSvdvals(a *Tensor, driver string)(retVal *Tensor) {
retVal, err := LinalgSvdvals(a, driver)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgSvdvalsOut(out *Tensor, a *Tensor, driver string)(retVal *Tensor) {
retVal, err := LinalgSvdvalsOut(out, a, driver)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgTensorinv(ind int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgTensorinv(ind, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgTensorinvOut(out *Tensor, ind int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgTensorinvOut(out, ind, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgTensorsolve(other *Tensor, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgTensorsolve(other, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgTensorsolveOut(out, other, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgVander(x *Tensor, n []int64)(retVal *Tensor) {
retVal, err := LinalgVander(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgVecdot(x *Tensor, y *Tensor, dim int64)(retVal *Tensor) {
retVal, err := LinalgVecdot(x, y, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgVecdotOut(out *Tensor, x *Tensor, y *Tensor, dim int64)(retVal *Tensor) {
retVal, err := LinalgVecdotOut(out, x, y, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinear(input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := Linear(input, weight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := LinearOut(out, input, weight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64)(retVal *Tensor) {
retVal, err := LinspaceOut(out, start, end, steps)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog(del bool)(retVal *Tensor) {
retVal, err := ts.Log(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog10(del bool)(retVal *Tensor) {
retVal, err := ts.Log10(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog10_()() {
err := ts.Log10_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLog10Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Log10Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog1p(del bool)(retVal *Tensor) {
retVal, err := ts.Log1p(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog1p_()() {
err := ts.Log1p_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLog1pOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Log1pOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog2(del bool)(retVal *Tensor) {
retVal, err := ts.Log2(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog2_()() {
err := ts.Log2_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLog2Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Log2Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog_()() {
err := ts.Log_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogNormal(mean float64, std float64, del bool)(retVal *Tensor) {
retVal, err := ts.LogNormal(mean, std, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogNormal_(mean float64, std float64)() {
err := ts.LogNormal_(mean, std)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogNormalOut(out *Tensor, mean float64, std float64, del bool)(retVal *Tensor) {
retVal, err := ts.LogNormalOut(out, mean, std, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSigmoid(del bool)(retVal *Tensor) {
retVal, err := ts.LogSigmoid(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogSigmoidBackwardGradInput(gradInput, gradOutput, buffer, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSigmoidOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogSigmoidOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LogSoftmax(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSoftmaxIntOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LogSoftmaxIntOut(out, dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogaddexp(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Logaddexp(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogaddexp2(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Logaddexp2(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogaddexp2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Logaddexp2Out(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogaddexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogaddexpOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogcumsumexp(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.Logcumsumexp(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.LogcumsumexpOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogdet(del bool)(retVal *Tensor) {
retVal, err := ts.Logdet(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalAnd(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalAnd(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalAnd_(other *Tensor)() {
err := ts.LogicalAnd_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogicalAndOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalAndOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalNot(del bool)(retVal *Tensor) {
retVal, err := ts.LogicalNot(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalNot_()() {
err := ts.LogicalNot_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogicalNotOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalNotOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalOr(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalOr(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalOr_(other *Tensor)() {
err := ts.LogicalOr_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogicalOrOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalOrOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalXor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalXor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalXor_(other *Tensor)() {
err := ts.LogicalXor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogicalXorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalXorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogit(eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.Logit(eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogit_(eps []float64)() {
err := ts.Logit_(eps)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogitBackward(gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.LogitBackward(gradOutput, eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.LogitBackwardGradInput(gradInput, gradOutput, eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.LogitOut(out, eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLogspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64)(retVal *Tensor) {
retVal, err := LogspaceOut(out, start, end, steps, base)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Logsumexp(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.LogsumexpOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLstm(input *Tensor, hx []*Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := Lstm(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLstmCell(input *Tensor, hx []*Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LstmCell(input, hx, wIh, wHh, bIh, bHh)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLstmData(data *Tensor, batchSizes *Tensor, hx []*Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LstmData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustLt(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Lt(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLt_(other *Scalar)() {
err := ts.Lt_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LtScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLtTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LtTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLtTensor_(other *Tensor)() {
err := ts.LtTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LtTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLuSolve(lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LuSolve(lUData, lUPivots, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLuUnpack(lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LuUnpack(lUData, lUPivots, unpackData, unpackPivots)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLuUnpackOut(p *Tensor, l *Tensor, u *Tensor, lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LuUnpackOut(p, l, u, lUData, lUPivots, unpackData, unpackPivots)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustMarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor) {
retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedFill(mask *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedFill(mask, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedFill_(mask *Tensor, value *Scalar)() {
err := ts.MaskedFill_(mask, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMaskedFillScalarOut(out *Tensor, mask *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedFillScalarOut(out, mask, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedFillTensor(mask *Tensor, value *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedFillTensor(mask, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedFillTensor_(mask *Tensor, value *Tensor)() {
err := ts.MaskedFillTensor_(mask, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMaskedFillTensorOut(out *Tensor, mask *Tensor, value *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedFillTensorOut(out, mask, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedScatter(mask *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedScatter(mask, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedScatter_(mask *Tensor, source *Tensor)() {
err := ts.MaskedScatter_(mask, source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMaskedScatterOut(out *Tensor, mask *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedScatterOut(out, mask, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedSelect(mask *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedSelect(mask, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor)(retVal *Tensor) {
retVal, err := MaskedSelectBackward(grad, input, mask)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedSelectOut(out *Tensor, mask *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedSelectOut(out, mask, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatmul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Matmul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MatmulOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixExp(del bool)(retVal *Tensor) {
retVal, err := ts.MatrixExp(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixExpBackward(grad *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MatrixExpBackward(grad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixH(del bool)(retVal *Tensor) {
retVal, err := ts.MatrixH(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixPower(n int64, del bool)(retVal *Tensor) {
retVal, err := ts.MatrixPower(n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor) {
retVal, err := ts.MatrixPowerOut(out, n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMax(del bool)(retVal *Tensor) {
retVal, err := ts.Max(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxDimMax(max, maxValues, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxOther(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxOther(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool1dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool1dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool2dBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool2dBackwardOut(out *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool2dBackwardOut(out, gradOutput, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool2dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool2dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool2dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool2dWithIndicesBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool2dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool2dWithIndicesOut(out, indices, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool3dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool3dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool3dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool3dWithIndicesBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool3dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool3dWithIndicesOut(out, indices, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxUnaryOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnaryOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool2d(indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool2d(indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaximum(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Maximum(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaximumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaximumOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMean(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Mean(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMeanDim(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.MeanDim(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMedian(del bool)(retVal *Tensor) {
retVal, err := ts.Median(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MedianDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MedianDimValues(values, indices, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMedianOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MedianOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMh(del bool)(retVal *Tensor) {
retVal, err := ts.Mh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMin(del bool)(retVal *Tensor) {
retVal, err := ts.Min(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MinDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MinDimMin(min, minIndices, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMinOther(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MinOther(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MinOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinUnaryOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MinUnaryOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinimum(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Minimum(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinimumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MinimumOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := MiopenBatchNorm(input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustMiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := MiopenBatchNormBackward(input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustMiopenBatchNormBackwardOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := MiopenBatchNormBackwardOut(out0, out1, out2, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustMiopenBatchNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := MiopenBatchNormOut(out0, out1, out2, input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustMiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolutionAddRelu(weight, z, alpha, bias, stride, padding, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenConvolutionOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolutionOut(out, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolutionRelu(weight, bias, stride, padding, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenConvolutionTransposeOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolutionTransposeOut(out, weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenDepthwiseConvolutionOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenDepthwiseConvolutionOut(out, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMiopenRnn(input *Tensor, weight []*Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, err := MiopenRnn(input, weight, weightStride0, hx, cx, mode, hiddenSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4
}
func MustMiopenRnnOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, out4 *Tensor, input *Tensor, weight []*Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, err := MiopenRnnOut(out0, out1, out2, out3, out4, input, weight, weightStride0, hx, cx, mode, hiddenSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4
}
func(ts *Tensor) MustMish(del bool)(retVal *Tensor) {
retVal, err := ts.Mish(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMish_()() {
err := ts.Mish_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMishBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MishBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMishOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MishOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnAdaptiveAvgPool2dBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnAdaptiveAvgPool2dBackwardOut(out *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnAdaptiveAvgPool2dBackwardOut(out, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnAdaptiveAvgPool2dOut(out, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnConvolutionOut(out *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnConvolutionOut(out, weight, bias, padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnLinear(weight *Tensor, bias *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnLinear(weight, bias, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Tensor)(retVal *Tensor) {
retVal, err := MkldnnLinearBackwardInput(inputSize, gradOutput, weight)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnLinearBackwardInputOut(out *Tensor, inputSize []int64, gradOutput *Tensor, weight *Tensor)(retVal *Tensor) {
retVal, err := MkldnnLinearBackwardInputOut(out, inputSize, gradOutput, weight)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := MkldnnLinearBackwardWeights(gradOutput, input, weight, biasDefined)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustMkldnnLinearBackwardWeightsOut(out0 *Tensor, out1 *Tensor, gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := MkldnnLinearBackwardWeightsOut(out0, out1, gradOutput, input, weight, biasDefined)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMkldnnLinearOut(out *Tensor, weight *Tensor, bias *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnLinearOut(out, weight, bias, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnMaxPool2dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor) {
retVal, err := MkldnnMaxPool2dBackward(gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnMaxPool2dBackwardOut(out *Tensor, gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor) {
retVal, err := MkldnnMaxPool2dBackwardOut(out, gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnMaxPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnMaxPool2dOut(out, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnMaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnMaxPool3dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor) {
retVal, err := MkldnnMaxPool3dBackward(gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnMaxPool3dBackwardOut(out *Tensor, gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor) {
retVal, err := MkldnnMaxPool3dBackwardOut(out, gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnMaxPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnMaxPool3dOut(out, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, inputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, inputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnReorderConv2dWeightOut(out *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, inputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnReorderConv2dWeightOut(out, padding, stride, dilation, groups, inputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnReorderConv3dWeight(padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnReorderConv3dWeightOut(out *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnReorderConv3dWeightOut(out, padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnRnnLayer(input *Tensor, weight0 *Tensor, weight1 *Tensor, weight2 *Tensor, weight3 *Tensor, hx_ *Tensor, cx_ *Tensor, reverse bool, batchSizes []int64, mode int64, hiddenSize int64, numLayers int64, hasBiases bool, bidirectional bool, batchFirst bool, train bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := MkldnnRnnLayer(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batchSizes, mode, hiddenSize, numLayers, hasBiases, bidirectional, batchFirst, train)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustMkldnnRnnLayerBackward(input *Tensor, weight1 *Tensor, weight2 *Tensor, weight3 *Tensor, weight4 *Tensor, hx_ *Tensor, cxTmp *Tensor, output *Tensor, hy_ *Tensor, cy_ *Tensor, gradOutput *Tensor, gradHy *Tensor, gradCy *Tensor, reverse bool, mode int64, hiddenSize int64, numLayers int64, hasBiases bool, train bool, bidirectional bool, batchSizes []int64, batchFirst bool, workspace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor, retVal6 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err := MkldnnRnnLayerBackward(input, weight1, weight2, weight3, weight4, hx_, cxTmp, output, hy_, cy_, gradOutput, gradHy, gradCy, reverse, mode, hiddenSize, numLayers, hasBiases, train, bidirectional, batchSizes, batchFirst, workspace)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6
}
func MustMkldnnRnnLayerBackwardOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, out4 *Tensor, out5 *Tensor, out6 *Tensor, input *Tensor, weight1 *Tensor, weight2 *Tensor, weight3 *Tensor, weight4 *Tensor, hx_ *Tensor, cxTmp *Tensor, output *Tensor, hy_ *Tensor, cy_ *Tensor, gradOutput *Tensor, gradHy *Tensor, gradCy *Tensor, reverse bool, mode int64, hiddenSize int64, numLayers int64, hasBiases bool, train bool, bidirectional bool, batchSizes []int64, batchFirst bool, workspace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor, retVal5 *Tensor, retVal6 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err := MkldnnRnnLayerBackwardOut(out0, out1, out2, out3, out4, out5, out6, input, weight1, weight2, weight3, weight4, hx_, cxTmp, output, hy_, cy_, gradOutput, gradHy, gradCy, reverse, mode, hiddenSize, numLayers, hasBiases, train, bidirectional, batchSizes, batchFirst, workspace)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6
}
func MustMkldnnRnnLayerOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, input *Tensor, weight0 *Tensor, weight1 *Tensor, weight2 *Tensor, weight3 *Tensor, hx_ *Tensor, cx_ *Tensor, reverse bool, batchSizes []int64, mode int64, hiddenSize int64, numLayers int64, hasBiases bool, bidirectional bool, batchFirst bool, train bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := MkldnnRnnLayerOut(out0, out1, out2, out3, input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batchSizes, mode, hiddenSize, numLayers, hasBiases, bidirectional, batchFirst, train)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func(ts *Tensor) MustMm(mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Mm(mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MmOut(out, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMode(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Mode(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustModeValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.ModeValues(values, indices, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMoveaxis(source []int64, destination []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Moveaxis(source, destination, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMoveaxisInt(source int64, destination int64, del bool)(retVal *Tensor) {
retVal, err := ts.MoveaxisInt(source, destination, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMovedim(source []int64, destination []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Movedim(source, destination, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMovedimInt(source int64, destination int64, del bool)(retVal *Tensor) {
retVal, err := ts.MovedimInt(source, destination, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMseLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MseLoss(target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MseLossBackwardGradInput(gradInput, gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMseLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MseLossOut(out, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMsort(del bool)(retVal *Tensor) {
retVal, err := ts.Msort(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMsortOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MsortOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMt(del bool)(retVal *Tensor) {
retVal, err := ts.Mt(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Mul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMul_(other *Tensor)() {
err := ts.Mul_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MulOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMulScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.MulScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMulScalar_(other *Scalar)() {
err := ts.MulScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMulScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.MulScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MultiMarginLossBackwardGradInput(gradInput, gradOutput, target, p, margin, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultilabelMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MultilabelMarginLoss(target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MultilabelMarginLossBackwardGradInput(gradInput, gradOutput, target, reduction, isTarget, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultinomial(numSamples int64, replacement bool, del bool)(retVal *Tensor) {
retVal, err := ts.Multinomial(numSamples, replacement, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool)(retVal *Tensor) {
retVal, err := ts.MultinomialOut(out, numSamples, replacement, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiply(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Multiply(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiply_(other *Tensor)() {
err := ts.Multiply_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMultiplyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MultiplyOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiplyScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.MultiplyScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiplyScalar_(other *Scalar)() {
err := ts.MultiplyScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMv(vec *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Mv(vec, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMvOut(out *Tensor, vec *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MvOut(out, vec, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMvlgamma(p int64, del bool)(retVal *Tensor) {
retVal, err := ts.Mvlgamma(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMvlgamma_(p int64)() {
err := ts.Mvlgamma_(p)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMvlgammaOut(out *Tensor, p int64, del bool)(retVal *Tensor) {
retVal, err := ts.MvlgammaOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanToNum(nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor) {
retVal, err := ts.NanToNum(nan, posinf, neginf, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanToNum_(nan []float64, posinf []float64, neginf []float64)() {
err := ts.NanToNum_(nan, posinf, neginf)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNanToNumOut(out *Tensor, nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor) {
retVal, err := ts.NanToNumOut(out, nan, posinf, neginf, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanmean(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Nanmean(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanmeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NanmeanOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanmedian(del bool)(retVal *Tensor) {
retVal, err := ts.Nanmedian(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanmedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.NanmedianDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustNanmedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.NanmedianDimValues(values, indices, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustNanmedianOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NanmedianOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.Nanquantile(q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileOut(out, q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileScalar(q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileScalarOut(out, q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNansum(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Nansum(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNansumOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NansumOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNarrow(dim int64, start int64, length int64, del bool)(retVal *Tensor) {
retVal, err := ts.Narrow(dim, start, length, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool)(retVal *Tensor) {
retVal, err := ts.NarrowCopy(dim, start, length, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNarrowCopyOut(out *Tensor, dim int64, start int64, length int64, del bool)(retVal *Tensor) {
retVal, err := ts.NarrowCopyOut(out, dim, start, length, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNarrowTensor(dim int64, start *Tensor, length int64, del bool)(retVal *Tensor) {
retVal, err := ts.NarrowTensor(dim, start, length, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeBatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustNativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeBatchNormOut(out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustNativeChannelShuffle(groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.NativeChannelShuffle(groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNativeDropout(input *Tensor, p float64, train bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := NativeDropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustNativeDropoutBackward(gradOutput *Tensor, mask *Tensor, scale float64)(retVal *Tensor) {
retVal, err := NativeDropoutBackward(gradOutput, mask, scale)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNativeDropoutBackwardOut(out *Tensor, gradOutput *Tensor, mask *Tensor, scale float64)(retVal *Tensor) {
retVal, err := NativeDropoutBackwardOut(out, gradOutput, mask, scale)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNativeDropoutOut(out0 *Tensor, out1 *Tensor, input *Tensor, p float64, train bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := NativeDropoutOut(out0, out1, input, p, train)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustNativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeGroupNorm(input, weight, bias, n, c, hxW, group, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustNativeGroupNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeGroupNormOut(out0, out1, out2, input, weight, bias, n, c, hxW, group, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustNativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeLayerNorm(input, normalizedShape, weight, bias, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustNativeLayerNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeLayerNormOut(out0, out1, out2, input, normalizedShape, weight, bias, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustNativeNorm(del bool)(retVal *Tensor) {
retVal, err := ts.NativeNorm(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNativeNormOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NativeNormOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNativeNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NativeNormScalaroptDimDtype(p, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNativeNormScalaroptDimDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NativeNormScalaroptDimDtypeOut(out, p, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNe(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Ne(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNe_(other *Scalar)() {
err := ts.Ne_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.NeScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNeTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NeTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNeTensor_(other *Tensor)() {
err := ts.NeTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NeTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNeg(del bool)(retVal *Tensor) {
retVal, err := ts.Neg(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNeg_()() {
err := ts.Neg_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNegOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NegOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNegative(del bool)(retVal *Tensor) {
retVal, err := ts.Negative(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNegative_()() {
err := ts.Negative_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNegativeOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NegativeOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNestedToPaddedTensor(padding float64, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.NestedToPaddedTensor(padding, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewEmptyOut(out *Tensor, size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.NewEmptyOut(out, size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewEmptyStrided(size, stride, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewEmptyStridedOut(out *Tensor, size []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.NewEmptyStridedOut(out, size, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewFullOut(out *Tensor, size []int64, fillValue *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.NewFullOut(out, size, fillValue, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewOnes(size, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewOnesOut(out *Tensor, size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.NewOnesOut(out, size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewZerosOut(out *Tensor, size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.NewZerosOut(out, size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNextafter(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Nextafter(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNextafter_(other *Tensor)() {
err := ts.Nextafter_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNextafterOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NextafterOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss2dBackwardGradInput(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NllLossBackwardGradInput(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLossNd(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLossNd(target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNonzero(del bool)(retVal *Tensor) {
retVal, err := ts.Nonzero(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNonzeroOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NonzeroOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNonzeroStatic(size int64, fillValue int64, del bool)(retVal *Tensor) {
retVal, err := ts.NonzeroStatic(size, fillValue, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNonzeroStaticOut(out *Tensor, size int64, fillValue int64, del bool)(retVal *Tensor) {
retVal, err := ts.NonzeroStaticOut(out, size, fillValue, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNorm(del bool)(retVal *Tensor) {
retVal, err := ts.Norm(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NormDtypeOut(out, p, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNormExceptDim(v *Tensor, pow int64, dim int64)(retVal *Tensor) {
retVal, err := NormExceptDim(v, pow, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NormOut(out, p, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormScalarOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NormScalarOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormScalaroptDim(p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NormScalaroptDim(p, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NormScalaroptDimDtype(p, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NormScalaroptDtype(p, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormScalaroptDtypeOut(out *Tensor, p *Scalar, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NormScalaroptDtypeOut(out, p, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormal_(mean float64, std float64)() {
err := ts.Normal_(mean, std)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNormalFunctional(mean float64, std float64, del bool)(retVal *Tensor) {
retVal, err := ts.NormalFunctional(mean, std, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNotEqual(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.NotEqual(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNotEqual_(other *Scalar)() {
err := ts.NotEqual_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNotEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.NotEqualScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNotEqualTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NotEqualTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNotEqualTensor_(other *Tensor)() {
err := ts.NotEqualTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNotEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NotEqualTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNuclearNorm(keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NuclearNorm(keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNuclearNormDim(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NuclearNormDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNuclearNormDimOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NuclearNormDimOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNuclearNormOut(out *Tensor, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NuclearNormOut(out, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNumpyT(del bool)(retVal *Tensor) {
retVal, err := ts.NumpyT(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOneHot(numClasses int64, del bool)(retVal *Tensor) {
retVal, err := ts.OneHot(numClasses, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Ones(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOnesLike(del bool)(retVal *Tensor) {
retVal, err := ts.OnesLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOnesLikeOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.OnesLikeOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustOnesOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := OnesOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOrgqr(input2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Orgqr(input2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOrgqrOut(out *Tensor, input2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.OrgqrOut(out, input2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOrmqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor) {
retVal, err := ts.Ormqr(input2, input3, left, transpose, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor) {
retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOuter(vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Outer(vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOuterOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.OuterOut(out, vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOutputNr(del bool)(retVal int64) {
retVal, err := ts.OutputNr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPad(pad []int64, mode string, value []float64, del bool)(retVal *Tensor) {
retVal, err := ts.Pad(pad, mode, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPadSequence(sequences []*Tensor, batchFirst bool, paddingValue float64)(retVal *Tensor) {
retVal, err := PadSequence(sequences, batchFirst, paddingValue)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool)(retVal *Tensor) {
retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPdist(p float64, del bool)(retVal *Tensor) {
retVal, err := ts.Pdist(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPermute(dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Permute(dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPermuteCopy(dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.PermuteCopy(dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPermuteCopyOut(out *Tensor, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.PermuteCopyOut(out, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPinMemory(device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.PinMemory(device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPinverse(rcond float64, del bool)(retVal *Tensor) {
retVal, err := ts.Pinverse(rcond, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPixelShuffle(upscaleFactor int64, del bool)(retVal *Tensor) {
retVal, err := ts.PixelShuffle(upscaleFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPixelShuffleOut(out *Tensor, upscaleFactor int64, del bool)(retVal *Tensor) {
retVal, err := ts.PixelShuffleOut(out, upscaleFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPixelUnshuffle(downscaleFactor int64, del bool)(retVal *Tensor) {
retVal, err := ts.PixelUnshuffle(downscaleFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPixelUnshuffleOut(out *Tensor, downscaleFactor int64, del bool)(retVal *Tensor) {
retVal, err := ts.PixelUnshuffleOut(out, downscaleFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPoisson(del bool)(retVal *Tensor) {
retVal, err := ts.Poisson(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal *Tensor) {
retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPoissonOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.PoissonOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPolar(abs *Tensor, angle *Tensor)(retVal *Tensor) {
retVal, err := Polar(abs, angle)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPolarOut(out *Tensor, abs *Tensor, angle *Tensor)(retVal *Tensor) {
retVal, err := PolarOut(out, abs, angle)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPolygamma(n int64, del bool)(retVal *Tensor) {
retVal, err := ts.Polygamma(n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPolygamma_(n int64)() {
err := ts.Polygamma_(n)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor) {
retVal, err := ts.PolygammaOut(out, n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPositive(del bool)(retVal *Tensor) {
retVal, err := ts.Positive(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPow(exponent *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Pow(exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPow_(exponent *Scalar)() {
err := ts.Pow_(exponent)
if err != nil { log.Fatal(err) }
return
}
func MustPowScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) {
retVal, err := PowScalar(selfScalar, exponent)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) {
retVal, err := PowScalarOut(out, selfScalar, exponent)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPowTensor_(exponent *Tensor)() {
err := ts.PowTensor_(exponent)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustPowTensorScalar(exponent *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.PowTensorScalar(exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPowTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.PowTensorScalarOut(out, exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPowTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.PowTensorTensorOut(out, exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPrelu(weight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Prelu(weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustProd(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Prod(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ProdDimInt(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ProdIntOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustProdOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ProdOut(out, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPut(index *Tensor, source *Tensor, accumulate bool, del bool)(retVal *Tensor) {
retVal, err := ts.Put(index, source, accumulate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPut_(index *Tensor, source *Tensor, accumulate bool)() {
err := ts.Put_(index, source, accumulate)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustPutOut(out *Tensor, index *Tensor, source *Tensor, accumulate bool, del bool)(retVal *Tensor) {
retVal, err := ts.PutOut(out, index, source, accumulate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQPerChannelAxis(del bool)(retVal int64) {
retVal, err := ts.QPerChannelAxis(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQPerChannelScales(del bool)(retVal *Tensor) {
retVal, err := ts.QPerChannelScales(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQPerChannelScalesOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.QPerChannelScalesOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQPerChannelZeroPoints(del bool)(retVal *Tensor) {
retVal, err := ts.QPerChannelZeroPoints(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQPerChannelZeroPointsOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.QPerChannelZeroPointsOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQScale(del bool)(retVal float64) {
retVal, err := ts.QScale(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQZeroPoint(del bool)(retVal int64) {
retVal, err := ts.QZeroPoint(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQr(some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Qr(some, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustQrQ(q *Tensor, r *Tensor, some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.QrQ(q, r, some, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustQuantile(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.Quantile(q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileOut(out, q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileScalar(q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileScalarOut(out, q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerChannelOut(out *Tensor, scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerChannelOut(out, scales, zeroPoints, axis, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerTensorDynamic(dtype gotch.DType, reduceRange bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerTensorDynamic(dtype, reduceRange, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerTensorDynamicOut(out *Tensor, dtype gotch.DType, reduceRange bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerTensorDynamicOut(out, dtype, reduceRange, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerTensorTensorQparams(scale, zeroPoint, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor) {
retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedBatchNormOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor) {
retVal, err := QuantizedBatchNormOut(out, input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) {
retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedLstmCell(input *Tensor, hx []*Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := QuantizedLstmCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustQuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizedMaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizedMaxPool1dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizedMaxPool1dOut(out, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizedMaxPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizedMaxPool2dOut(out, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizedMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizedMaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizedMaxPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizedMaxPool3dOut(out, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) {
retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) {
retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRad2deg(del bool)(retVal *Tensor) {
retVal, err := ts.Rad2deg(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRad2deg_()() {
err := ts.Rad2deg_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRad2degOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Rad2degOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Rand(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandLike(del bool)(retVal *Tensor) {
retVal, err := ts.RandLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandLikeOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RandLikeOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := RandOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Randint(high, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandintLike(high int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandintLike(high, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandintLikeLowDtype(low int64, high int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandintLikeLowDtype(low, high, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandintLikeLowDtypeOut(out *Tensor, low int64, high int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandintLikeLowDtypeOut(out, low, high, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandintLikeOut(out *Tensor, high int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandintLikeOut(out, high, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := RandintLow(low, high, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandintLowOut(out *Tensor, low int64, high int64, size []int64)(retVal *Tensor) {
retVal, err := RandintLowOut(out, low, high, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandintOut(out *Tensor, high int64, size []int64)(retVal *Tensor) {
retVal, err := RandintOut(out, high, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Randn(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandnLike(del bool)(retVal *Tensor) {
retVal, err := ts.RandnLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandnLikeOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RandnLikeOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandnOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := RandnOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandom(del bool)(retVal *Tensor) {
retVal, err := ts.Random(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandom_()() {
err := ts.Random_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRandomFrom(from int64, to []int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandomFrom(from, to, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandomFrom_(from int64, to []int64)() {
err := ts.RandomFrom_(from, to)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRandomFromOut(out *Tensor, from int64, to []int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandomFromOut(out, from, to, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandomOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RandomOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandomTo(to int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandomTo(to, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandomTo_(to int64)() {
err := ts.RandomTo_(to)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRandomToOut(out *Tensor, to int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandomToOut(out, to, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Randperm(n, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandpermOut(out *Tensor, n int64)(retVal *Tensor) {
retVal, err := RandpermOut(out, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRange(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Range(start, end, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRangeOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor) {
retVal, err := RangeOut(out, start, end)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRangeOut_(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor) {
retVal, err := RangeOut_(out, start, end)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := RangeStep(start, end, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRavel(del bool)(retVal *Tensor) {
retVal, err := ts.Ravel(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReal(del bool)(retVal *Tensor) {
retVal, err := ts.Real(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReciprocal(del bool)(retVal *Tensor) {
retVal, err := ts.Reciprocal(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReciprocal_()() {
err := ts.Reciprocal_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustReciprocalOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ReciprocalOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad1d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad1d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad1dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad1dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad2d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad2d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad2dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad2dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad3d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad3d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad3dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad3dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad3dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRelu(del bool)(retVal *Tensor) {
retVal, err := ts.Relu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRelu6(del bool)(retVal *Tensor) {
retVal, err := ts.Relu6(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRelu6_()() {
err := ts.Relu6_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRelu_()() {
err := ts.Relu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustReluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ReluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRemainder(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Remainder(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRemainder_(other *Scalar)() {
err := ts.Remainder_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRemainderScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.RemainderScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRemainderScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := RemainderScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRemainderScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := RemainderScalarTensorOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRemainderTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RemainderTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRemainderTensor_(other *Tensor)() {
err := ts.RemainderTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRemainderTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RemainderTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRenorm(p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Renorm(p, dim, maxnorm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRenorm_(p *Scalar, dim int64, maxnorm *Scalar)() {
err := ts.Renorm_(p, dim, maxnorm)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.RenormOut(out, p, dim, maxnorm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRepeat(repeats []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Repeat(repeats, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRepeatInterleave(repeats *Tensor, outputSize []int64)(retVal *Tensor) {
retVal, err := RepeatInterleave(repeats, outputSize)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRepeatInterleaveSelfInt(repeats int64, dim []int64, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.RepeatInterleaveSelfInt(repeats, dim, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRepeatInterleaveSelfTensor(repeats *Tensor, dim []int64, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.RepeatInterleaveSelfTensor(repeats, dim, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRepeatInterleaveTensorOut(out *Tensor, repeats *Tensor, outputSize []int64)(retVal *Tensor) {
retVal, err := RepeatInterleaveTensorOut(out, repeats, outputSize)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRepeatOut(out *Tensor, repeats []int64, del bool)(retVal *Tensor) {
retVal, err := ts.RepeatOut(out, repeats, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad1d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad1d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad1dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad1dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad2d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad2d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad2dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad2dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad3d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad3d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad3dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad3dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRequiresGrad_(requiresGrad bool)() {
err := ts.RequiresGrad_(requiresGrad)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustReshape(shape []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Reshape(shape, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReshapeAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ReshapeAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResize(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Resize(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResize_(size []int64)() {
err := ts.Resize_(size)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustResizeAs(theTemplate *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ResizeAs(theTemplate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResizeAs_(theTemplate *Tensor)() {
err := ts.ResizeAs_(theTemplate)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustResizeAsOut(out *Tensor, theTemplate *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ResizeAsOut(out, theTemplate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResizeAsSparse(theTemplate *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ResizeAsSparse(theTemplate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResizeAsSparse_(theTemplate *Tensor)() {
err := ts.ResizeAsSparse_(theTemplate)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustResizeAsSparseOut(out *Tensor, theTemplate *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ResizeAsSparseOut(out, theTemplate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResizeOut(out *Tensor, size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ResizeOut(out, size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResolveConj(del bool)(retVal *Tensor) {
retVal, err := ts.ResolveConj(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResolveNeg(del bool)(retVal *Tensor) {
retVal, err := ts.ResolveNeg(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRetainsGrad(del bool)(retVal bool) {
retVal, err := ts.RetainsGrad(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRnnRelu(input *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := RnnRelu(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) {
retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRnnReluData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := RnnReluData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustRnnTanh(input *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := RnnTanh(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) {
retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRnnTanhData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []*Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := RnnTanhData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustRoll(shifts []int64, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Roll(shifts, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRollOut(out *Tensor, shifts []int64, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.RollOut(out, shifts, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRot90(k int64, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Rot90(k, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRot90Out(out *Tensor, k int64, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Rot90Out(out, k, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRound(del bool)(retVal *Tensor) {
retVal, err := ts.Round(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRound_()() {
err := ts.Round_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRoundDecimals(decimals int64, del bool)(retVal *Tensor) {
retVal, err := ts.RoundDecimals(decimals, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRoundDecimals_(decimals int64)() {
err := ts.RoundDecimals_(decimals)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRoundDecimalsOut(out *Tensor, decimals int64, del bool)(retVal *Tensor) {
retVal, err := ts.RoundDecimalsOut(out, decimals, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRoundOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RoundOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRowIndices(del bool)(retVal *Tensor) {
retVal, err := ts.RowIndices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRowIndicesCopy(del bool)(retVal *Tensor) {
retVal, err := ts.RowIndicesCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRowIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RowIndicesCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRowStack(tensors []*Tensor)(retVal *Tensor) {
retVal, err := RowStack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRowStackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor) {
retVal, err := RowStackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRrelu(training bool, del bool)(retVal *Tensor) {
retVal, err := ts.Rrelu(training, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRrelu_(training bool)() {
err := ts.Rrelu_(training)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRreluWithNoise(noise *Tensor, training bool, del bool)(retVal *Tensor) {
retVal, err := ts.RreluWithNoise(noise, training, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRreluWithNoise_(noise *Tensor, training bool)() {
err := ts.RreluWithNoise_(noise, training)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool)(retVal *Tensor) {
retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRreluWithNoiseBackwardOut(out *Tensor, gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool)(retVal *Tensor) {
retVal, err := ts.RreluWithNoiseBackwardOut(out, gradOutput, noise, lower, upper, training, selfIsResult, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool)(retVal *Tensor) {
retVal, err := ts.RreluWithNoiseOut(out, noise, training, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsqrt(del bool)(retVal *Tensor) {
retVal, err := ts.Rsqrt(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsqrt_()() {
err := ts.Rsqrt_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRsqrtOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RsqrtOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsub(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Rsub(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsubScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.RsubScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsubScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.RsubScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsubTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RsubTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := ScalarTensor(s, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustScalarTensorOut(out *Tensor, s *Scalar)(retVal *Tensor) {
retVal, err := ScalarTensorOut(out, s)
if err != nil { log.Fatal(err) }
return retVal
}
func MustScaledDotProductAttention(query *Tensor, key *Tensor, value *Tensor, attnMask *Tensor, dropoutP float64, isCausal bool, scale []float64)(retVal *Tensor) {
retVal, err := ScaledDotProductAttention(query, key, value, attnMask, dropoutP, isCausal, scale)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatter(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Scatter(dim, index, src, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatter_(dim int64, index *Tensor, src *Tensor)() {
err := ts.Scatter_(dim, index, src)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterAdd(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterAdd(dim, index, src, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterAdd_(dim int64, index *Tensor, src *Tensor)() {
err := ts.ScatterAdd_(dim, index, src)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterAddOut(out, dim, index, src, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterReduce(dim, index, src, reduce, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string)() {
err := ts.ScatterReduce_(dim, index, src, reduce)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterReduceOut(out, dim, index, src, reduce, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterSrcOut(out, dim, index, src, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterValue(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterValue(dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterValue_(dim int64, index *Tensor, value *Scalar)() {
err := ts.ScatterValue_(dim, index, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterValueOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterValueOut(out, dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterValueReduce(dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterValueReduce(dim, index, value, reduce, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterValueReduce_(dim int64, index *Tensor, value *Scalar, reduce string)() {
err := ts.ScatterValueReduce_(dim, index, value, reduce)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterValueReduceOut(out, dim, index, value, reduce, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSearchsorted(sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Searchsorted(sortedSequence, outInt32, right, side, sorter, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSearchsortedScalar(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool, side string, sorter *Tensor)(retVal *Tensor) {
retVal, err := SearchsortedScalar(sortedSequence, selfScalar, outInt32, right, side, sorter)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSearchsortedScalarOut(out *Tensor, sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool, side string, sorter *Tensor)(retVal *Tensor) {
retVal, err := SearchsortedScalarOut(out, sortedSequence, selfScalar, outInt32, right, side, sorter)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSearchsortedTensorOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, side string, sorter *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SearchsortedTensorOut(out, sortedSequence, outInt32, right, side, sorter, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor, offsets *Tensor, axis int64, unsafety bool, initial *Scalar)(retVal *Tensor) {
retVal, err := SegmentReduce(data, reduce, lengths, indices, offsets, axis, unsafety, initial)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSegmentReduceOut(out *Tensor, data *Tensor, reduce string, lengths *Tensor, indices *Tensor, offsets *Tensor, axis int64, unsafety bool, initial *Scalar)(retVal *Tensor) {
retVal, err := SegmentReduceOut(out, data, reduce, lengths, indices, offsets, axis, unsafety, initial)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelect(dim int64, index int64, del bool)(retVal *Tensor) {
retVal, err := ts.Select(dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int64)(retVal *Tensor) {
retVal, err := SelectBackward(gradOutput, inputSizes, dim, index)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSelectBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, dim int64, index int64)(retVal *Tensor) {
retVal, err := SelectBackwardOut(out, gradOutput, inputSizes, dim, index)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelectCopy(dim int64, index int64, del bool)(retVal *Tensor) {
retVal, err := ts.SelectCopy(dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelectCopyIntOut(out *Tensor, dim int64, index int64, del bool)(retVal *Tensor) {
retVal, err := ts.SelectCopyIntOut(out, dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelectScatter(src *Tensor, dim int64, index int64, del bool)(retVal *Tensor) {
retVal, err := ts.SelectScatter(src, dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelectScatterOut(out *Tensor, src *Tensor, dim int64, index int64, del bool)(retVal *Tensor) {
retVal, err := ts.SelectScatterOut(out, src, dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelu(del bool)(retVal *Tensor) {
retVal, err := ts.Selu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelu_()() {
err := ts.Selu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSet(del bool)(retVal *Tensor) {
retVal, err := ts.Set(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSet_()() {
err := ts.Set_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSetOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SetOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSetRequiresGrad(r bool, del bool)(retVal *Tensor) {
retVal, err := ts.SetRequiresGrad(r, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSetSourceTensor(source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SetSourceTensor(source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSetSourceTensor_(source *Tensor)() {
err := ts.SetSourceTensor_(source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSetSourceTensorOut(out *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SetSourceTensorOut(out, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSetSourceTensorStorageOffset_(source *Tensor, storageOffset int64, size []int64, stride []int64)() {
err := ts.SetSourceTensorStorageOffset_(source, storageOffset, size, stride)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSgn(del bool)(retVal *Tensor) {
retVal, err := ts.Sgn(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSgn_()() {
err := ts.Sgn_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSgnOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SgnOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSigmoid(del bool)(retVal *Tensor) {
retVal, err := ts.Sigmoid(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSigmoid_()() {
err := ts.Sigmoid_()
if err != nil { log.Fatal(err) }
return
}
func MustSigmoidBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor) {
retVal, err := SigmoidBackward(gradOutput, output)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor) {
retVal, err := SigmoidBackwardGradInput(gradInput, gradOutput, output)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSigmoidOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SigmoidOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSign(del bool)(retVal *Tensor) {
retVal, err := ts.Sign(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSign_()() {
err := ts.Sign_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSignOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SignOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSignbit(del bool)(retVal *Tensor) {
retVal, err := ts.Signbit(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSignbitOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SignbitOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSilu(del bool)(retVal *Tensor) {
retVal, err := ts.Silu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSilu_()() {
err := ts.Silu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSiluBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SiluBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SiluBackwardGradInput(gradInput, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSiluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SiluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSin(del bool)(retVal *Tensor) {
retVal, err := ts.Sin(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSin_()() {
err := ts.Sin_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSinOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SinOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSinc(del bool)(retVal *Tensor) {
retVal, err := ts.Sinc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSinc_()() {
err := ts.Sinc_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSincOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SincOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSinh(del bool)(retVal *Tensor) {
retVal, err := ts.Sinh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSinh_()() {
err := ts.Sinh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSinhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SinhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlice(dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.Slice(dim, start, end, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64)(retVal *Tensor) {
retVal, err := SliceBackward(gradOutput, inputSizes, dim, start, end, step)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSliceBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64)(retVal *Tensor) {
retVal, err := SliceBackwardOut(out, gradOutput, inputSizes, dim, start, end, step)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSliceCopy(dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.SliceCopy(dim, start, end, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSliceCopyTensorOut(out *Tensor, dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.SliceCopyTensorOut(out, dim, start, end, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSliceScatter(src *Tensor, dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.SliceScatter(src, dim, start, end, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSliceScatterOut(out *Tensor, src *Tensor, dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.SliceScatterOut(out, src, dim, start, end, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Slogdet(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSlogdetOut(sign *Tensor, logabsdet *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SlogdetOut(sign, logabsdet, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvDilated2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvDilated2dOut(out, weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvDilated3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvDilated3dOut(out, weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmm(mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Smm(mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) {
retVal, err := ts.SmoothL1Loss(target, reduction, beta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) {
retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, beta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) {
retVal, err := ts.SmoothL1LossBackwardGradInput(gradInput, gradOutput, target, reduction, beta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) {
retVal, err := ts.SmoothL1LossOut(out, target, reduction, beta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.SoftMarginLoss(target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.SoftMarginLossBackwardGradInput(gradInput, gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.SoftMarginLossOut(out, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Softmax(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftmaxIntOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SoftmaxIntOut(out, dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftplus(del bool)(retVal *Tensor) {
retVal, err := ts.Softplus(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SoftplusBackwardGradInput(gradInput, gradOutput, beta, threshold, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftplusOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SoftplusOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftshrink(del bool)(retVal *Tensor) {
retVal, err := ts.Softshrink(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SoftshrinkBackwardGradInput(gradInput, gradOutput, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftshrinkOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SoftshrinkOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSort(dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Sort(dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSortStable(stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SortStable(stable, dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSortValues(values *Tensor, indices *Tensor, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SortValues(values, indices, dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSortValuesStable(values *Tensor, indices *Tensor, stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SortValuesStable(values, indices, stable, dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustSparseBscTensor(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseBscTensor(ccolIndices, rowIndices, values, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseBscTensorCcolRowValueSize(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseBscTensorCcolRowValueSize(ccolIndices, rowIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseBsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseBsrTensor(crowIndices, colIndices, values, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseBsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseBsrTensorCrowColValueSize(crowIndices, colIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCompressedTensor(compressedIndices *Tensor, plainIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCompressedTensor(compressedIndices, plainIndices, values, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCompressedTensorCompPlainValueSize(compressedIndices *Tensor, plainIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCompressedTensorCompPlainValueSize(compressedIndices, plainIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCooTensor(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device, isCoalesced bool)(retVal *Tensor) {
retVal, err := SparseCooTensorIndices(indices, values, optionsKind, optionsDevice, isCoalesced)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, isCoalesced bool)(retVal *Tensor) {
retVal, err := SparseCooTensorIndicesSize(indices, values, size, optionsKind, optionsDevice, isCoalesced)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCooTensorSizeOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := SparseCooTensorSizeOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCscTensor(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCscTensor(ccolIndices, rowIndices, values, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCscTensorCcolRowValueSize(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCscTensorCcolRowValueSize(ccolIndices, rowIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCsrTensor(crowIndices, colIndices, values, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCsrTensorCrowColValueSize(crowIndices, colIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseDim(del bool)(retVal int64) {
retVal, err := ts.SparseDim(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseMask(mask *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SparseMask(mask, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseMaskOut(out *Tensor, mask *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SparseMaskOut(out, mask, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseResize(size []int64, sparseDim int64, denseDim int64, del bool)(retVal *Tensor) {
retVal, err := ts.SparseResize(size, sparseDim, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64)() {
err := ts.SparseResize_(size, sparseDim, denseDim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSparseResizeAndClear(size []int64, sparseDim int64, denseDim int64, del bool)(retVal *Tensor) {
retVal, err := ts.SparseResizeAndClear(size, sparseDim, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)() {
err := ts.SparseResizeAndClear_(size, sparseDim, denseDim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSparseResizeAndClearOut(out *Tensor, size []int64, sparseDim int64, denseDim int64, del bool)(retVal *Tensor) {
retVal, err := ts.SparseResizeAndClearOut(out, size, sparseDim, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseResizeOut(out *Tensor, size []int64, sparseDim int64, denseDim int64, del bool)(retVal *Tensor) {
retVal, err := ts.SparseResizeOut(out, size, sparseDim, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseSampledAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SparseSampledAddmm(mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseSampledAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SparseSampledAddmmOut(out, mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialAiryAi(x *Tensor)(retVal *Tensor) {
retVal, err := SpecialAiryAi(x)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialAiryAiOut(out *Tensor, x *Tensor)(retVal *Tensor) {
retVal, err := SpecialAiryAiOut(out, x)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialBesselJ0(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialBesselJ0(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialBesselJ0Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialBesselJ0Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialBesselJ1(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialBesselJ1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialBesselJ1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialBesselJ1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialBesselY0(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialBesselY0(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialBesselY0Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialBesselY0Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialBesselY1(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialBesselY1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialBesselY1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialBesselY1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialT(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialT(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialTNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialTNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialTNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialTNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialTOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialTOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialTXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialTXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialTXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialTXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialU(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialU(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialUNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialUNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialUNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialUNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialUOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialUOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialUXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialUXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialUXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialUXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialV(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialV(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialVNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialVNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialVNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialVNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialVOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialVOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialVXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialVXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialVXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialVXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialW(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialW(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialWNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialWNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialWNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialWNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialWOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialWOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialWXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialWXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialChebyshevPolynomialWXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialChebyshevPolynomialWXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialDigamma(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialDigamma(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialDigammaOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialDigammaOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialEntr(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialEntr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialEntrOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialEntrOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErf(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfc(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfcOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfcOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfcx(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfcx(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfcxOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfcxOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfinv(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfinv(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfinvOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfinvOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExp2(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExp2(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExp2Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExp2Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExpit(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExpit(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExpitOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExpitOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExpm1(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExpm1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExpm1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExpm1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammainc(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammainc(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammaincOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammaincOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammaincc(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammaincc(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammainccOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammainccOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammaln(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammaln(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammalnOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammalnOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialH(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialH(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHe(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHe(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHeNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHeNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHeNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHeNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHeOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHeOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHeXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHeXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialHermitePolynomialHeXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialHermitePolynomialHeXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI0(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI0(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI0Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI0Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI0e(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI0e(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI0eOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI0eOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI1(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI1e(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI1e(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI1eOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI1eOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLaguerrePolynomialL(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialLaguerrePolynomialL(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLaguerrePolynomialLNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialLaguerrePolynomialLNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLaguerrePolynomialLNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialLaguerrePolynomialLNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLaguerrePolynomialLOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialLaguerrePolynomialLOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLaguerrePolynomialLXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialLaguerrePolynomialLXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLaguerrePolynomialLXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialLaguerrePolynomialLXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLegendrePolynomialP(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialLegendrePolynomialP(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLegendrePolynomialPNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialLegendrePolynomialPNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLegendrePolynomialPNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialLegendrePolynomialPNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLegendrePolynomialPOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialLegendrePolynomialPOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLegendrePolynomialPXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialLegendrePolynomialPXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialLegendrePolynomialPXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialLegendrePolynomialPXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLog1p(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLog1p(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLog1pOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLog1pOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogNdtr(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogNdtr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogNdtrOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogNdtrOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogSoftmax(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogit(eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogit(eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogitOut(out, eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogsumexp(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogsumexpOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialModifiedBesselI0(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialModifiedBesselI0(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialModifiedBesselI0Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialModifiedBesselI0Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialModifiedBesselI1(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialModifiedBesselI1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialModifiedBesselI1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialModifiedBesselI1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialModifiedBesselK0(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialModifiedBesselK0(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialModifiedBesselK0Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialModifiedBesselK0Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialModifiedBesselK1(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialModifiedBesselK1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialModifiedBesselK1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialModifiedBesselK1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialMultigammaln(p int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialMultigammaln(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialMultigammalnOut(out *Tensor, p int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialMultigammalnOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialNdtr(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialNdtr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialNdtrOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialNdtrOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialNdtri(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialNdtri(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialNdtriOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialNdtriOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialPolygamma(n int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialPolygamma(n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialPolygammaOut(out, n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialPsi(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialPsi(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialPsiOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialPsiOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialRound(decimals int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialRound(decimals, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialRoundOut(out *Tensor, decimals int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialRoundOut(out, decimals, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialScaledModifiedBesselK0(x *Tensor)(retVal *Tensor) {
retVal, err := SpecialScaledModifiedBesselK0(x)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialScaledModifiedBesselK0Out(out *Tensor, x *Tensor)(retVal *Tensor) {
retVal, err := SpecialScaledModifiedBesselK0Out(out, x)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialScaledModifiedBesselK1(x *Tensor)(retVal *Tensor) {
retVal, err := SpecialScaledModifiedBesselK1(x)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialScaledModifiedBesselK1Out(out *Tensor, x *Tensor)(retVal *Tensor) {
retVal, err := SpecialScaledModifiedBesselK1Out(out, x)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialT(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialT(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialTNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialTNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialTNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialTNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialTOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialTOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialTXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialTXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialTXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialTXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialU(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialU(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialUNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialUNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialUNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialUNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialUOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialUOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialUXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialUXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialUXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialUXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialV(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialV(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialVNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialVNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialVNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialVNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialVOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialVOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialVXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialVXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialVXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialVXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialW(x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialW(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialWNScalar(x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialWNScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialWNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialWNScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialWOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialWOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialWXScalar(x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialWXScalar(x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialShiftedChebyshevPolynomialWXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retVal *Tensor) {
retVal, err := SpecialShiftedChebyshevPolynomialWXScalarOut(out, x, n)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialSinc(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialSinc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialSincOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialSincOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialSoftmax(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialSphericalBesselJ0(x *Tensor)(retVal *Tensor) {
retVal, err := SpecialSphericalBesselJ0(x)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialSphericalBesselJ0Out(out *Tensor, x *Tensor)(retVal *Tensor) {
retVal, err := SpecialSphericalBesselJ0Out(out, x)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlog1py(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlog1py(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlog1pyOtherScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlog1pyOtherScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlog1pyOtherScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlog1pyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlog1pyOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialXlog1pySelfScalar(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialXlog1pySelfScalarOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlogy(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlogy(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlogyOtherScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlogyOtherScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlogyOtherScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlogyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlogyOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialXlogySelfScalar(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialXlogySelfScalarOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialZeta(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialZeta(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialZetaOtherScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialZetaOtherScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialZetaOtherScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialZetaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialZetaOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialZetaSelfScalar(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialZetaSelfScalarOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqrt(del bool)(retVal *Tensor) {
retVal, err := ts.Sqrt(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqrt_()() {
err := ts.Sqrt_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSqrtOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SqrtOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSquare(del bool)(retVal *Tensor) {
retVal, err := ts.Square(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSquare_()() {
err := ts.Square_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSquareOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SquareOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueeze(del bool)(retVal *Tensor) {
retVal, err := ts.Squeeze(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueeze_()() {
err := ts.Squeeze_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSqueezeCopy(del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeCopyDim(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeCopyDim(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeCopyDimOut(out *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeCopyDimOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeCopyDims(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeCopyDims(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeCopyDimsOut(out *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeCopyDimsOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeDim(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeDim(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeDim_(dim int64)() {
err := ts.SqueezeDim_(dim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSqueezeDims(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeDims(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeDims_(dim []int64)() {
err := ts.SqueezeDims_(dim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSspaddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Sspaddmm(mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SspaddmmOut(out, mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustStack(tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := Stack(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustStackOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor) {
retVal, err := StackOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStd(unbiased bool, del bool)(retVal *Tensor) {
retVal, err := ts.Std(unbiased, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStdCorrection(dim []int64, correction *Scalar, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.StdCorrection(dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStdCorrectionOut(out *Tensor, dim []int64, correction *Scalar, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.StdCorrectionOut(out, dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStdDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.StdDim(dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStdMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.StdMean(unbiased, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustStdMeanCorrection(dim []int64, correction *Scalar, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.StdMeanCorrection(dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustStdMeanCorrectionOut(out0 *Tensor, out1 *Tensor, dim []int64, correction *Scalar, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.StdMeanCorrectionOut(out0, out1, dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustStdMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.StdMeanDim(dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustStdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool)(retVal *Tensor) {
retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, returnComplex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStftCenter(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, padMode string, normalized bool, onesided bool, returnComplex bool, del bool)(retVal *Tensor) {
retVal, err := ts.StftCenter(nFft, hopLength, winLength, window, center, padMode, normalized, onesided, returnComplex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSub(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Sub(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSub_(other *Tensor)() {
err := ts.Sub_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSubOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SubOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SubScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubScalar_(other *Scalar)() {
err := ts.SubScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSubScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SubScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubtract(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Subtract(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubtract_(other *Tensor)() {
err := ts.Subtract_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSubtractOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SubtractOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubtractScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SubtractScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubtractScalar_(other *Scalar)() {
err := ts.SubtractScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSum(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Sum(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SumDimIntlist(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SumIntlistOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSumOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SumOut(out, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSumToSize(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SumToSize(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSvd(some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.Svd(some, computeUv, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustSvdU(u *Tensor, s *Tensor, v *Tensor, some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.SvdU(u, s, v, some, computeUv, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustSwapaxes(axis0 int64, axis1 int64, del bool)(retVal *Tensor) {
retVal, err := ts.Swapaxes(axis0, axis1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSwapaxes_(axis0 int64, axis1 int64)() {
err := ts.Swapaxes_(axis0, axis1)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSwapdims(dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts.Swapdims(dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSwapdims_(dim0 int64, dim1 int64)() {
err := ts.Swapdims_(dim0, dim1)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustT(del bool)(retVal *Tensor) {
retVal, err := ts.T(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustT_()() {
err := ts.T_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTCopy(del bool)(retVal *Tensor) {
retVal, err := ts.TCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTake(index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Take(index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTakeAlongDim(indices *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.TakeAlongDim(indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.TakeAlongDimOut(out, indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTakeOut(out *Tensor, index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TakeOut(out, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTan(del bool)(retVal *Tensor) {
retVal, err := ts.Tan(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTan_()() {
err := ts.Tan_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTanOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TanOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTanh(del bool)(retVal *Tensor) {
retVal, err := ts.Tanh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTanh_()() {
err := ts.Tanh_()
if err != nil { log.Fatal(err) }
return
}
func MustTanhBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor) {
retVal, err := TanhBackward(gradOutput, output)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor) {
retVal, err := TanhBackwardGradInput(gradInput, gradOutput, output)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTanhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TanhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor) {
retVal, err := ts.TensordotOut(out, other, dimsSelf, dimsOther, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustThreshold(threshold *Scalar, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Threshold(threshold, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustThreshold_(threshold *Scalar, value *Scalar)() {
err := ts.Threshold_(threshold, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ThresholdBackward(gradOutput, threshold, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ThresholdBackwardGradInput(gradInput, gradOutput, threshold, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ThresholdOut(out, threshold, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTile(dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Tile(dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTo(device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.To(device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToDense(dtype gotch.DType, maskedGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToDense(dtype, maskedGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustToDenseBackward(grad *Tensor, input *Tensor, maskedGrad bool)(retVal *Tensor) {
retVal, err := ToDenseBackward(grad, input, maskedGrad)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToDevice(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToDevice(device, dtype, nonBlocking, copy, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToDtype(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToDtype(dtype, nonBlocking, copy, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToDtypeLayout(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToDtypeLayout(optionsKind, optionsDevice, nonBlocking, copy, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToMkldnn(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ToMkldnn(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustToMkldnnBackward(grad *Tensor, input *Tensor)(retVal *Tensor) {
retVal, err := ToMkldnnBackward(grad, input)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToMkldnnOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ToMkldnnOut(out, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToOther(other *Tensor, nonBlocking bool, copy bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToOther(other, nonBlocking, copy, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToPaddedTensor(padding float64, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToPaddedTensor(padding, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToPaddedTensorOut(out *Tensor, padding float64, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToPaddedTensorOut(out, padding, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToSparse(layout Layout, blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToSparse(layout, blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToSparseBsc(blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToSparseBsc(blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToSparseBsr(blocksize []int64, denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToSparseBsr(blocksize, denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToSparseCsc(denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToSparseCsc(denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToSparseCsr(denseDim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToSparseCsr(denseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToSparseSparseDim(sparseDim int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToSparseSparseDim(sparseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTopk(k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Topk(k, dim, largest, sorted, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustTopkValues(values *Tensor, indices *Tensor, k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.TopkValues(values, indices, k, dim, largest, sorted, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustTotype(scalarType gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Totype(scalarType, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrace(del bool)(retVal *Tensor) {
retVal, err := ts.Trace(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTraceBackward(grad *Tensor, sizes []int64)(retVal *Tensor) {
retVal, err := TraceBackward(grad, sizes)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTraceOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TraceOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts.Transpose(dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTranspose_(dim0 int64, dim1 int64)() {
err := ts.Transpose_(dim0, dim1)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTransposeCopy(dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts.TransposeCopy(dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTransposeCopyIntOut(out *Tensor, dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts.TransposeCopyIntOut(out, dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTrapezoid(y *Tensor, dim int64)(retVal *Tensor) {
retVal, err := Trapezoid(y, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) {
retVal, err := TrapezoidX(y, x, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTrapz(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) {
retVal, err := Trapz(y, x, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTrapzDx(y *Tensor, dx float64, dim int64)(retVal *Tensor) {
retVal, err := TrapzDx(y, dx, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTriangularSolve(a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.TriangularSolve(a, upper, transpose, unitriangular, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustTriangularSolveX(x *Tensor, m *Tensor, a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.TriangularSolveX(x, m, a, upper, transpose, unitriangular, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustTril(diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.Tril(diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTril_(diagonal int64)() {
err := ts.Tril_(diagonal)
if err != nil { log.Fatal(err) }
return
}
func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTrilIndicesOut(out *Tensor, row int64, col int64, offset int64)(retVal *Tensor) {
retVal, err := TrilIndicesOut(out, row, col, offset)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrilOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.TrilOut(out, diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal *Tensor) {
retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTriu(diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.Triu(diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTriu_(diagonal int64)() {
err := ts.Triu_(diagonal)
if err != nil { log.Fatal(err) }
return
}
func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTriuIndicesOut(out *Tensor, row int64, col int64, offset int64)(retVal *Tensor) {
retVal, err := TriuIndicesOut(out, row, col, offset)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTriuOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.TriuOut(out, diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrueDivide(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TrueDivide(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrueDivide_(other *Tensor)() {
err := ts.TrueDivide_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTrueDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TrueDivideOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrueDivideScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.TrueDivideScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrueDivideScalar_(other *Scalar)() {
err := ts.TrueDivideScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTrunc(del bool)(retVal *Tensor) {
retVal, err := ts.Trunc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrunc_()() {
err := ts.Trunc_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTruncOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TruncOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTypeAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TypeAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnflatten(dim int64, sizes []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Unflatten(dim, sizes, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnfold(dimension int64, size int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.Unfold(dimension, size, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64)(retVal *Tensor) {
retVal, err := UnfoldBackward(gradIn, inputSizes, dim, size, step)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUnfoldBackwardOut(out *Tensor, gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64)(retVal *Tensor) {
retVal, err := UnfoldBackwardOut(out, gradIn, inputSizes, dim, size, step)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnfoldCopy(dimension int64, size int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.UnfoldCopy(dimension, size, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnfoldCopyOut(out *Tensor, dimension int64, size int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.UnfoldCopyOut(out, dimension, size, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUniform(from float64, to float64, del bool)(retVal *Tensor) {
retVal, err := ts.Uniform(from, to, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUniform_(from float64, to float64)() {
err := ts.Uniform_(from, to)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustUniformOut(out *Tensor, from float64, to float64, del bool)(retVal *Tensor) {
retVal, err := ts.UniformOut(out, from, to, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUniqueConsecutive(returnInverse bool, returnCounts bool, dim []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueConsecutive(returnInverse, returnCounts, dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUniqueConsecutiveOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, returnInverse bool, returnCounts bool, dim []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueConsecutiveOut(out0, out1, out2, returnInverse, returnCounts, dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUniqueDim(dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueDim(dim, sorted, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUniqueDimConsecutive(dim int64, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueDimConsecutive(dim, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUniqueDimConsecutiveOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, dim int64, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueDimConsecutiveOut(out0, out1, out2, dim, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUniqueDimOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueDimOut(out0, out1, out2, dim, sorted, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUnsqueeze(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.Unsqueeze(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnsqueeze_(dim int64)() {
err := ts.Unsqueeze_(dim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustUnsqueezeCopy(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.UnsqueezeCopy(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnsqueezeCopyOut(out *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.UnsqueezeCopyOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBicubic2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleBicubic2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBicubic2dVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor) {
retVal, err := UpsampleBicubic2dVec(input, outputSize, alignCorners, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBilinear2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleBilinear2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBilinear2dVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor) {
retVal, err := UpsampleBilinear2dVec(input, outputSize, alignCorners, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor) {
retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleLinear1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor) {
retVal, err := UpsampleLinear1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleLinear1dVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor) {
retVal, err := UpsampleLinear1dVec(input, outputSize, alignCorners, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest1d(outputSize, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest1dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest1dVec(input, outputSize, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest2dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest2dVec(input, outputSize, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest3dVec(input *Tensor, outputSize []int64, scaleFactors []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest3dVec(input, outputSize, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleTrilinear3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleTrilinear3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleTrilinear3dVec(input *Tensor, outputSize []int64, alignCorners bool, scaleFactors []float64)(retVal *Tensor) {
retVal, err := UpsampleTrilinear3dVec(input, outputSize, alignCorners, scaleFactors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool)(retVal *Tensor) {
retVal, err := ValueSelectingReductionBackward(grad, dim, indices, sizes, keepdim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustValues(del bool)(retVal *Tensor) {
retVal, err := ts.Values(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustValuesCopy(del bool)(retVal *Tensor) {
retVal, err := ts.ValuesCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustValuesCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ValuesCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustVander(x *Tensor, n []int64, increasing bool)(retVal *Tensor) {
retVal, err := Vander(x, n, increasing)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVar(unbiased bool, del bool)(retVal *Tensor) {
retVal, err := ts.Var(unbiased, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVarCorrection(dim []int64, correction *Scalar, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.VarCorrection(dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVarCorrectionOut(out *Tensor, dim []int64, correction *Scalar, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.VarCorrectionOut(out, dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVarDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.VarDim(dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVarMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.VarMean(unbiased, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustVarMeanCorrection(dim []int64, correction *Scalar, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.VarMeanCorrection(dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustVarMeanCorrectionOut(out0 *Tensor, out1 *Tensor, dim []int64, correction *Scalar, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.VarMeanCorrectionOut(out0, out1, dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustVarMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.VarMeanDim(dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustVarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVdot(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Vdot(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVdotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.VdotOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustView(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.View(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ViewAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAsComplex(del bool)(retVal *Tensor) {
retVal, err := ts.ViewAsComplex(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAsComplexCopy(del bool)(retVal *Tensor) {
retVal, err := ts.ViewAsComplexCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAsComplexCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ViewAsComplexCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAsReal(del bool)(retVal *Tensor) {
retVal, err := ts.ViewAsReal(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAsRealCopy(del bool)(retVal *Tensor) {
retVal, err := ts.ViewAsRealCopy(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAsRealCopyOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ViewAsRealCopyOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewCopy(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ViewCopy(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewCopyDtype(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ViewCopyDtype(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewCopyDtypeOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ViewCopyDtypeOut(out, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewCopyOut(out *Tensor, size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ViewCopyOut(out, size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewDtype(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ViewDtype(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustVstack(tensors []*Tensor)(retVal *Tensor) {
retVal, err := Vstack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustVstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor) {
retVal, err := VstackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustWhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar)(retVal *Tensor) {
retVal, err := WhereScalar(condition, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustWhereScalarother(condition *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.WhereScalarother(condition, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustWhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := WhereScalarself(condition, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustWhereSelf(condition *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.WhereSelf(condition, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustWhereSelfOut(out *Tensor, condition *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.WhereSelfOut(out, condition, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogy(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Xlogy(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogy_(other *Tensor)() {
err := ts.Xlogy_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustXlogyOutscalarOther(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.XlogyOutscalarOther(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustXlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := XlogyOutscalarSelf(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogyOuttensor(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.XlogyOuttensor(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogyScalarOther(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.XlogyScalarOther(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogyScalarOther_(other *Scalar)() {
err := ts.XlogyScalarOther_(other)
if err != nil { log.Fatal(err) }
return
}
func MustXlogyScalarSelf(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := XlogyScalarSelf(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustZero(del bool)(retVal *Tensor) {
retVal, err := ts.Zero(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustZero_()() {
err := ts.Zero_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustZeroOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ZeroOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Zeros(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustZerosLike(del bool)(retVal *Tensor) {
retVal, err := ts.ZerosLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustZerosLikeOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ZerosLikeOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustZerosOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := ZerosOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
// End of implementing Tensor =================================