gotch/tensor/must-tensor-generated.go
2021-11-06 14:44:27 +11:00

14500 lines
382 KiB
Go

package tensor
// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!
import(
"log"
"github.com/sugarme/gotch"
)
func(ts *Tensor) Must__And_(other *Scalar)() {
err := ts.__And_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__AndTensor_(other *Tensor)() {
err := ts.__AndTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Iand_(other *Scalar)() {
err := ts.__Iand_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IandTensor_(other *Tensor)() {
err := ts.__IandTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Ilshift_(other *Scalar)() {
err := ts.__Ilshift_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IlshiftTensor_(other *Tensor)() {
err := ts.__IlshiftTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Ior_(other *Scalar)() {
err := ts.__Ior_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IorTensor_(other *Tensor)() {
err := ts.__IorTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Irshift_(other *Scalar)() {
err := ts.__Irshift_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IrshiftTensor_(other *Tensor)() {
err := ts.__IrshiftTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Ixor_(other *Scalar)() {
err := ts.__Ixor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__IxorTensor_(other *Tensor)() {
err := ts.__IxorTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Lshift_(other *Scalar)() {
err := ts.__Lshift_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__LshiftTensor_(other *Tensor)() {
err := ts.__LshiftTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Or_(other *Scalar)() {
err := ts.__Or_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__OrTensor_(other *Tensor)() {
err := ts.__OrTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Rshift_(other *Scalar)() {
err := ts.__Rshift_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__RshiftTensor_(other *Tensor)() {
err := ts.__RshiftTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__Xor_(other *Scalar)() {
err := ts.__Xor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must__XorTensor_(other *Tensor)() {
err := ts.__XorTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool2d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool3d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AdaptiveAvgPool3dBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddBatchDim(batchDim int64, level int64, del bool)(retVal *Tensor) {
retVal, err := ts._AddBatchDim(batchDim, level, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddRelu(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AddRelu(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddRelu_(other *Tensor)() {
err := ts._AddRelu_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_AddReluOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._AddReluOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddReluScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts._AddReluScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_AddReluScalar_(other *Scalar)() {
err := ts._AddReluScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_Aminmax(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._Aminmax(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_AminmaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._AminmaxDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)() {
err := ts._AmpUpdateScale_(growthTracker, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_BaddbmmMkl_(batch1 *Tensor, batch2 *Tensor)() {
err := ts._BaddbmmMkl_(batch1, batch2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_CastByte(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastByte(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastChar(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastChar(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastDouble(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastDouble(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastFloat(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastFloat(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastHalf(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastHalf(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastInt(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastInt(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastLong(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastLong(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CastShort(nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CastShort(nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_Cat(tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := _Cat(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := _CatOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor)(retVal *Tensor) {
retVal, err := _CdistBackward(grad, x1, x2, p, cdist)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CholeskySolveHelper(a *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts._CholeskySolveHelper(a, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Coalesce(del bool)(retVal *Tensor) {
retVal, err := ts._Coalesce(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Coalesced_(coalesced bool)() {
err := ts._Coalesced_(coalesced)
if err != nil { log.Fatal(err) }
return
}
func Must_ComputeLinearCombination(input *Tensor, coefficients *Tensor)(retVal *Tensor) {
retVal, err := _ComputeLinearCombination(input, coefficients)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor)(retVal *Tensor) {
retVal, err := _ComputeLinearCombinationOut(out, input, coefficients)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Conj(del bool)(retVal *Tensor) {
retVal, err := ts._Conj(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConjPhysical(del bool)(retVal *Tensor) {
retVal, err := ts._ConjPhysical(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConvDepthwise2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ConvDepthwise2d(weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConvDepthwise2dBackward(gradInput *Tensor, gradWeight *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._ConvDepthwise2dBackward(gradInput, gradWeight, gradOutput, weight, kernelSize, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_ConvDepthwise2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ConvDepthwise2dOut(out, weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool)(retVal *Tensor) {
retVal, err := ts._ConvertIndicesFromCooToCsr(size, outInt32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt32 bool, del bool)(retVal *Tensor) {
retVal, err := ts._ConvertIndicesFromCooToCsrOut(out, size, outInt32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool)(retVal *Tensor) {
retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled, allowTf32)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ConvolutionDeprecated(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal *Tensor) {
retVal, err := _ConvolutionDeprecated(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := _ConvolutionMode(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_ConvolutionNogroup(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64)(retVal *Tensor) {
retVal, err := _ConvolutionNogroup(input, weight, bias, stride, padding, dilation, transposed, outputPadding)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CopyFrom(dst *Tensor, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._CopyFrom(dst, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_CopyFromAndResize(dst *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._CopyFromAndResize(dst, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor) {
retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, deterministic bool, zeroInfinity bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _CudnnCtcLoss(logProbs, targets, inputLengths, targetLengths, blank, deterministic, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CudnnRnn(input *Tensor, weight []Tensor, weightStride0 int64, weightBuf *Tensor, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, err := _CudnnRnn(input, weight, weightStride0, weightBuf, hx, cx, mode, hiddenSize, projSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4
}
func Must_CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, projSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal *Tensor) {
retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, batchFirst, bidirectional)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CufftGetPlanCacheMaxSize(deviceIndex int64)(retVal int64) {
retVal, err := _CufftGetPlanCacheMaxSize(deviceIndex)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_CufftGetPlanCacheSize(deviceIndex int64)(retVal int64) {
retVal, err := _CufftGetPlanCacheSize(deviceIndex)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_DebugHasInternalOverlap(del bool)(retVal int64) {
retVal, err := ts._DebugHasInternalOverlap(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_DetLuBasedHelper(del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._DetLuBasedHelper(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_DetLuBasedHelperBackwardHelper(detGrad *Tensor, det *Tensor, lu *Tensor, pivs *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._DetLuBasedHelperBackwardHelper(detGrad, det, lu, pivs, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_DimArange(like *Tensor, dim int64)(retVal *Tensor) {
retVal, err := _DimArange(like, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Dimi(del bool)(retVal int64) {
retVal, err := ts._Dimi(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Dimv(del bool)(retVal int64) {
retVal, err := ts._Dimv(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor) {
retVal, err := _DirichletGrad(x, alpha, total)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBag(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagDenseBackward(grad, indices, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBagForwardOnly(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := _EmbeddingBagForwardOnly(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func Must_EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor, paddingIdx int64)(retVal *Tensor) {
retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal *Tensor) {
retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_EuclideanDist(x1 *Tensor, x2 *Tensor)(retVal *Tensor) {
retVal, err := _EuclideanDist(x1, x2)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor) {
retVal, err := ts._FakeQuantizeLearnablePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._FakeQuantizeLearnablePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal *Tensor) {
retVal, err := ts._FakeQuantizeLearnablePerTensorAffine(scale, zeroPoint, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, gradFactor float64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._FakeQuantizeLearnablePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, gradFactor, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tensor, zeroPoint *Tensor, fakeQuantEnabled *Tensor, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FakeQuantizePerTensorAffineCachemaskTensorQparams(scale, zeroPoint, fakeQuantEnabled, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_FftC2c(dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor) {
retVal, err := ts._FftC2c(dim, normalization, forward, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftC2cOut(out *Tensor, dim []int64, normalization int64, forward bool, del bool)(retVal *Tensor) {
retVal, err := ts._FftC2cOut(out, dim, normalization, forward, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftC2r(dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor) {
retVal, err := ts._FftC2r(dim, normalization, lastDimSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftC2rOut(out *Tensor, dim []int64, normalization int64, lastDimSize int64, del bool)(retVal *Tensor) {
retVal, err := ts._FftC2rOut(out, dim, normalization, lastDimSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftR2c(dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor) {
retVal, err := ts._FftR2c(dim, normalization, onesided, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FftR2cOut(out *Tensor, dim []int64, normalization int64, onesided bool, del bool)(retVal *Tensor) {
retVal, err := ts._FftR2cOut(out, dim, normalization, onesided, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_FusedDropout(p float64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FusedDropout(p, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_FusedMovingAvgObsFqHelper(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._FusedMovingAvgObsFqHelper(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_FwPrimal(level int64, del bool)(retVal *Tensor) {
retVal, err := ts._FwPrimal(level, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._GatherSparseBackward(dim, index, grad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := _GridSampler2dCpuFallback(input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _GridSampler2dCpuFallbackBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_HasCompatibleShallowCopyType(from *Tensor, del bool)(retVal bool) {
retVal, err := ts._HasCompatibleShallowCopyType(from, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_IndexCopy_(dim int64, index *Tensor, source *Tensor)() {
err := ts._IndexCopy_(dim, index, source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_Indices(del bool)(retVal *Tensor) {
retVal, err := ts._Indices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_InverseHelper(del bool)(retVal *Tensor) {
retVal, err := ts._InverseHelper(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_LinalgInvOutHelper_(infosLu *Tensor, infosGetri *Tensor)() {
err := ts._LinalgInvOutHelper_(infosLu, infosGetri)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_LinalgQrHelper(mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._LinalgQrHelper(mode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._LogSoftmax(dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._LogSoftmaxBackwardData(gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._LogSoftmaxBackwardDataOut(out, gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._LogSoftmaxOut(out, dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Logcumsumexp(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._Logcumsumexp(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._LogcumsumexpOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_LuWithInfo(pivot bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._LuWithInfo(pivot, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_MakeDual(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor) {
retVal, err := _MakeDual(primal, tangent, level)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool)(retVal *Tensor) {
retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal *Tensor) {
retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MaskedScale(mask *Tensor, scale float64, del bool)(retVal *Tensor) {
retVal, err := ts._MaskedScale(mask, scale, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MkldnnReshape(shape []int64, del bool)(retVal *Tensor) {
retVal, err := ts._MkldnnReshape(shape, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts._MkldnnTranspose(dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64)() {
err := ts._MkldnnTranspose_(dim0, dim1)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_NegView(del bool)(retVal *Tensor) {
retVal, err := ts._NegView(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NnpackAvailable()(retVal bool) {
retVal, err := _NnpackAvailable()
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64)(retVal *Tensor) {
retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NnpackSpatialConvolutionBackwardInput(input *Tensor, gradOutput *Tensor, weight *Tensor, padding []int64)(retVal *Tensor) {
retVal, err := _NnpackSpatialConvolutionBackwardInput(input, gradOutput, weight, padding)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_NnpackSpatialConvolutionBackwardWeight(input *Tensor, weightsize []int64, gradOutput *Tensor, padding []int64)(retVal *Tensor) {
retVal, err := _NnpackSpatialConvolutionBackwardWeight(input, weightsize, gradOutput, padding)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Nnz(del bool)(retVal int64) {
retVal, err := ts._Nnz(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _PackPaddedSequence(input, lengths, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool)(retVal *Tensor) {
retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddingValue *Scalar, totalLength int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _PadPackedSequence(data, batchSizes, batchFirst, paddingValue, totalLength)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._PdistBackward(grad, p, pdist, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_PinMemory(device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts._PinMemory(device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool)(retVal *Tensor) {
retVal, err := ts._RemoveBatchDim(level, batchSize, outDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ReshapeAlias(size []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts._ReshapeAlias(size, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ReshapeFromTensor(shape *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._ReshapeFromTensor(shape, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _RowwisePrune(weight, mask, compressedIndicesDtype)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_SWhere(condition *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._SWhere(condition, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SampleDirichlet(del bool)(retVal *Tensor) {
retVal, err := ts._SampleDirichlet(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SaturateWeightToFp16(weight *Tensor)(retVal *Tensor) {
retVal, err := _SaturateWeightToFp16(weight)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce string, lengths *Tensor, axis int64)(retVal *Tensor) {
retVal, err := _SegmentReduceBackward(grad, output, data, reduce, lengths, axis)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ShapeAsTensor(del bool)(retVal *Tensor) {
retVal, err := ts._ShapeAsTensor(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int64, numGenerated int64, dtype gotch.DType)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _SobolEngineDraw(quasi, n, sobolstate, dimension, numGenerated, dtype)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64)() {
err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_SobolEngineInitializeState_(dimension int64)() {
err := ts._SobolEngineInitializeState_(dimension)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_SobolEngineScramble_(ltm *Tensor, dimension int64)() {
err := ts._SobolEngineScramble_(ltm, dimension)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._Softmax(dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._SoftmaxBackwardData(gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._SoftmaxBackwardDataOut(gradInput, gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._SoftmaxOut(out, dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SolveHelper(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._SolveHelper(a, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_SparseAddmm(sparse *Tensor, dense *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._SparseAddmm(sparse, dense, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := _SparseCsrTensorUnsafe(crowIndices, colIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseLogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._SparseLogSoftmax(dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseLogSoftmaxBackwardData(gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseLogSoftmaxInt(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseMaskHelper(t *Tensor, maskIndices *Tensor)(retVal *Tensor) {
retVal, err := _SparseMaskHelper(t, maskIndices)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_SparseMm(sparse *Tensor, dense *Tensor)(retVal *Tensor) {
retVal, err := _SparseMm(sparse, dense)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSoftmax(dim, halfToFloat, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSoftmaxBackwardData(gradOutput, output, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSoftmaxInt(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSparseMatmul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSparseMatmul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSum(del bool)(retVal *Tensor) {
retVal, err := ts._SparseSum(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumBackward(grad *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumBackward(grad, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumDim(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumDim(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumDimDtype(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SparseSumDtype(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts._SparseSumDtype(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_Stack(tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := _Stack(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_StackOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := _StackOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_StandardGamma(del bool)(retVal *Tensor) {
retVal, err := ts._StandardGamma(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_StandardGammaGrad(output *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._StandardGammaGrad(output, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_SvdHelper(some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._SvdHelper(some, computeUv, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) Must_SymeigHelper(eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._SymeigHelper(eigenvectors, upper, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_TestAmbiguousDefaults(dummy *Tensor, a int64, b int64)(retVal *Tensor) {
retVal, err := _TestAmbiguousDefaults(dummy, a, b)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string)(retVal *Tensor) {
retVal, err := _TestAmbiguousDefaultsB(dummy, a, b)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestOptionalFilledIntlist(values *Tensor, addends []int64)(retVal *Tensor) {
retVal, err := _TestOptionalFilledIntlist(values, addends)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestOptionalIntlist(values *Tensor, addends []int64)(retVal *Tensor) {
retVal, err := _TestOptionalIntlist(values, addends)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_TestSerializationSubcmul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts._TestSerializationSubcmul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_TestStringDefault(dummy *Tensor, a string, b string)(retVal *Tensor) {
retVal, err := _TestStringDefault(dummy, a, b)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, del bool)(retVal *Tensor) {
retVal, err := ts._ToCopy(optionsKind, optionsDevice, nonBlocking, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal *Tensor) {
retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Unique(sorted bool, returnInverse bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts._Unique(sorted, returnInverse, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_Unique2(sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts._Unique2(sorted, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func Must_UnpackDual(dual *Tensor, level int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _UnpackDual(dual, level)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) Must_UnsafeView(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts._UnsafeView(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64)(retVal bool) {
retVal, err := _UseCudnnCtcLoss(logProbs, targets, inputLengths, targetLengths, blank)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_UseCudnnRnnFlattenWeight()(retVal bool) {
retVal, err := _UseCudnnRnnFlattenWeight()
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Values(del bool)(retVal *Tensor) {
retVal, err := ts._Values(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) Must_Version(del bool)(retVal int64) {
retVal, err := ts._Version(del)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_WeightNorm(v *Tensor, g *Tensor, dim int64)(retVal *Tensor) {
retVal, err := _WeightNorm(v, g, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func Must_WeightNormCudaInterface(v *Tensor, g *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _WeightNormCudaInterface(v, g, dim)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_WeightNormCudaInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _WeightNormCudaInterfaceBackward(gradW, savedV, savedG, savedNorms, dim)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func Must_WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, savedNorms *Tensor, dim int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := _WeightNormDifferentiableBackward(gradW, savedV, savedG, savedNorms, dim)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAbs(del bool)(retVal *Tensor) {
retVal, err := ts.Abs(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAbs_()() {
err := ts.Abs_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAbsOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AbsOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAbsolute(del bool)(retVal *Tensor) {
retVal, err := ts.Absolute(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAbsolute_()() {
err := ts.Absolute_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAbsoluteOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AbsoluteOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAcos(del bool)(retVal *Tensor) {
retVal, err := ts.Acos(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAcos_()() {
err := ts.Acos_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAcosOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AcosOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAcosh(del bool)(retVal *Tensor) {
retVal, err := ts.Acosh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAcosh_()() {
err := ts.Acosh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAcoshOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AcoshOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool1d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool2d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool3d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool3dBackward(gradInput, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool1d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool1d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdaptiveMaxPool2d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool2d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveMaxPool2dBackwardGradInput(gradInput, gradOutput, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool2dOut(out, indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdaptiveMaxPool3d(outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool3d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AdaptiveMaxPool3dBackwardGradInput(gradInput, gradOutput, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AdaptiveMaxPool3dOut(out, indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAdd(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Add(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAdd_(other *Tensor)() {
err := ts.Add_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.AddScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddScalar_(other *Scalar)() {
err := ts.AddScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addbmm(batch1, batch2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddbmm_(batch1 *Tensor, batch2 *Tensor)() {
err := ts.Addbmm_(batch1, batch2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddbmmOut(out, batch1, batch2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addcdiv(tensor1, tensor2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddcdiv_(tensor1 *Tensor, tensor2 *Tensor)() {
err := ts.Addcdiv_(tensor1, tensor2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddcmul(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addcmul(tensor1, tensor2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddcmul_(tensor1 *Tensor, tensor2 *Tensor)() {
err := ts.Addcmul_(tensor1, tensor2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addmm(mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddmm_(mat1 *Tensor, mat2 *Tensor)() {
err := ts.Addmm_(mat1, mat2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddmmOut(out, mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddmv(mat *Tensor, vec *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addmv(mat, vec, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddmv_(mat *Tensor, vec *Tensor)() {
err := ts.Addmv_(mat, vec)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddmvOut(out, mat, vec, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddr(vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Addr(vec1, vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAddr_(vec1 *Tensor, vec2 *Tensor)() {
err := ts.Addr_(vec1, vec2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AddrOut(out, vec1, vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustAffineGridGenerator(theta *Tensor, size []int64, alignCorners bool)(retVal *Tensor) {
retVal, err := AffineGridGenerator(theta, size, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustAffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool)(retVal *Tensor) {
retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAlias(del bool)(retVal *Tensor) {
retVal, err := ts.Alias(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAlignAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AlignAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAll(del bool)(retVal *Tensor) {
retVal, err := ts.All(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAllAllOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AllAllOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAllDim(dim int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AllDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAllOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AllOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAllclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal bool) {
retVal, err := ts.Allclose(other, rtol, atol, equalNan, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor) {
retVal, err := AlphaDropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAlphaDropout_(p float64, train bool)() {
err := ts.AlphaDropout_(p, train)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAmax(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Amax(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AmaxOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAmin(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Amin(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AminOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAminmax(dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Aminmax(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAminmaxOut(min *Tensor, max *Tensor, dim []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.AminmaxOut(min, max, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustAngle(del bool)(retVal *Tensor) {
retVal, err := ts.Angle(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAngleOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AngleOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAny(del bool)(retVal *Tensor) {
retVal, err := ts.Any(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAnyAllOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AnyAllOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAnyDim(dim int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AnyDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAnyOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.AnyOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustArange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Arange(end, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustArangeOut(out *Tensor, end *Scalar)(retVal *Tensor) {
retVal, err := ArangeOut(out, end)
if err != nil { log.Fatal(err) }
return retVal
}
func MustArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := ArangeStart(start, end, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustArangeStartOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor) {
retVal, err := ArangeStartOut(out, start, end)
if err != nil { log.Fatal(err) }
return retVal
}
func MustArangeStartStep(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := ArangeStartStep(start, end, step, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArccos(del bool)(retVal *Tensor) {
retVal, err := ts.Arccos(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArccos_()() {
err := ts.Arccos_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArccosOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArccosOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArccosh(del bool)(retVal *Tensor) {
retVal, err := ts.Arccosh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArccosh_()() {
err := ts.Arccosh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArccoshOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArccoshOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArcsin(del bool)(retVal *Tensor) {
retVal, err := ts.Arcsin(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArcsin_()() {
err := ts.Arcsin_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArcsinOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArcsinOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArcsinh(del bool)(retVal *Tensor) {
retVal, err := ts.Arcsinh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArcsinh_()() {
err := ts.Arcsinh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArcsinhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArcsinhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctan(del bool)(retVal *Tensor) {
retVal, err := ts.Arctan(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctan_()() {
err := ts.Arctan_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArctanOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArctanOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctanh(del bool)(retVal *Tensor) {
retVal, err := ts.Arctanh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArctanh_()() {
err := ts.Arctanh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustArctanhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ArctanhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgmax(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Argmax(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.ArgmaxOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgmin(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Argmin(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.ArgminOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustArgsort(dim int64, descending bool, del bool)(retVal *Tensor) {
retVal, err := ts.Argsort(dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsStrided(size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AsStrided(size, stride, storageOffset, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset []int64)() {
err := ts.AsStrided_(size, stride, storageOffset)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAsin(del bool)(retVal *Tensor) {
retVal, err := ts.Asin(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsin_()() {
err := ts.Asin_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAsinOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AsinOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsinh(del bool)(retVal *Tensor) {
retVal, err := ts.Asinh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAsinh_()() {
err := ts.Asinh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAsinhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AsinhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtan(del bool)(retVal *Tensor) {
retVal, err := ts.Atan(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtan2(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Atan2(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtan2_(other *Tensor)() {
err := ts.Atan2_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAtan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Atan2Out(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtan_()() {
err := ts.Atan_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAtanOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AtanOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtanh(del bool)(retVal *Tensor) {
retVal, err := ts.Atanh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtanh_()() {
err := ts.Atanh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustAtanhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.AtanhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtleast1d(del bool)(retVal *Tensor) {
retVal, err := ts.Atleast1d(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtleast2d(del bool)(retVal *Tensor) {
retVal, err := ts.Atleast2d(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAtleast3d(del bool)(retVal *Tensor) {
retVal, err := ts.Atleast3d(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool2dBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool3dBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustAvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBaddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Baddbmm(batch1, batch2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBaddbmm_(batch1 *Tensor, batch2 *Tensor)() {
err := ts.Baddbmm_(batch1, batch2)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BaddbmmOut(out, batch1, batch2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := BartlettWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor) {
retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor, count *Tensor)(retVal *Tensor) {
retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, meanDy, meanDyXmu, count)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormBackwardReduce(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, inputG bool, weightG bool, biasG bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := BatchNormBackwardReduce(gradOut, input, mean, invstd, weight, inputG, weightG, biasG)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustBatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor) {
retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor) {
retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, count int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormGatherStats(input, mean, invstd, runningMean, runningVar, momentum, eps, count)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64, eps float64, counts *Tensor)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormGatherStatsWithCounts(input, mean, invstd, runningMean, runningVar, momentum, eps, counts)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormStats(input *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormStats(input, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustBatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor, momentum float64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := BatchNormUpdateStats(input, runningMean, runningVar, momentum)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustBernoulli(del bool)(retVal *Tensor) {
retVal, err := ts.Bernoulli(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBernoulli_(p *Tensor)() {
err := ts.Bernoulli_(p)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBernoulliFloat_(p float64)() {
err := ts.BernoulliFloat_(p)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBernoulliOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BernoulliOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBernoulliP(p float64, del bool)(retVal *Tensor) {
retVal, err := ts.BernoulliP(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := Bilinear(input1, input2, weight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyBackwardGradInput(gradInput, gradOutput, target, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.BinaryCrossEntropyWithLogitsBackward(gradOutput, target, weight, posWeight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBincount(weights *Tensor, minlength int64, del bool)(retVal *Tensor) {
retVal, err := ts.Bincount(weights, minlength, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBinomial(count *Tensor, prob *Tensor)(retVal *Tensor) {
retVal, err := Binomial(count, prob)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseAnd(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseAnd(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseAnd_(other *Scalar)() {
err := ts.BitwiseAnd_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseAndScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseAndScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseAndTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseAndTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseAndTensor_(other *Tensor)() {
err := ts.BitwiseAndTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseAndTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseAndTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShift(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseLeftShift(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShift_(other *Tensor)() {
err := ts.BitwiseLeftShift_(other)
if err != nil { log.Fatal(err) }
return
}
func MustBitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseLeftShiftScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseLeftShiftTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseLeftShiftTensorScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseLeftShiftTensorScalar_(other *Scalar)() {
err := ts.BitwiseLeftShiftTensorScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseLeftShiftTensorScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseNot(del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseNot(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseNot_()() {
err := ts.BitwiseNot_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseNotOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseNotOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseOr(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseOr(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseOr_(other *Scalar)() {
err := ts.BitwiseOr_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseOrScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseOrScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseOrTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseOrTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseOrTensor_(other *Tensor)() {
err := ts.BitwiseOrTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseOrTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseOrTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShift(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseRightShift(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShift_(other *Tensor)() {
err := ts.BitwiseRightShift_(other)
if err != nil { log.Fatal(err) }
return
}
func MustBitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := BitwiseRightShiftScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseRightShiftTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShiftTensorScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseRightShiftTensorScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseRightShiftTensorScalar_(other *Scalar)() {
err := ts.BitwiseRightShiftTensorScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseRightShiftTensorScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseXor(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseXor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseXor_(other *Scalar)() {
err := ts.BitwiseXor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseXorScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseXorScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseXorTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseXorTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBitwiseXorTensor_(other *Tensor)() {
err := ts.BitwiseXorTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustBitwiseXorTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BitwiseXorTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := BlackmanWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBlockDiag(tensors []Tensor)(retVal *Tensor) {
retVal, err := BlockDiag(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBmm(mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Bmm(mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBmmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.BmmOut(out, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBroadcastTo(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.BroadcastTo(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBucketize(boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) {
retVal, err := ts.Bucketize(boundaries, outInt32, right, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustBucketizeScalar(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool)(retVal *Tensor) {
retVal, err := BucketizeScalar(selfScalar, boundaries, outInt32, right)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustBucketizeTensorOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) {
retVal, err := ts.BucketizeTensorOut(out, boundaries, outInt32, right, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCanCast(from gotch.DType, to gotch.DType)(retVal bool) {
retVal, err := CanCast(from, to)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCartesianProd(tensors []Tensor)(retVal *Tensor) {
retVal, err := CartesianProd(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCat(tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := Cat(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := CatOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCauchy_(median float64, sigma float64)() {
err := ts.Cauchy_(median, sigma)
if err != nil { log.Fatal(err) }
return
}
func MustCdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64)(retVal *Tensor) {
retVal, err := Cdist(x1, x2, p, computeMode)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCeil(del bool)(retVal *Tensor) {
retVal, err := ts.Ceil(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCeil_()() {
err := ts.Ceil_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCeilOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CeilOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCelu(del bool)(retVal *Tensor) {
retVal, err := ts.Celu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCelu_()() {
err := ts.Celu_()
if err != nil { log.Fatal(err) }
return
}
func MustChainMatmul(matrices []Tensor)(retVal *Tensor) {
retVal, err := ChainMatmul(matrices)
if err != nil { log.Fatal(err) }
return retVal
}
func MustChainMatmulOut(out *Tensor, matrices []Tensor)(retVal *Tensor) {
retVal, err := ChainMatmulOut(out, matrices)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustChannelShuffle(groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.ChannelShuffle(groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholesky(upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.Cholesky(upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskyInverse(upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskyInverse(upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskyInverseOut(out *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskyInverseOut(out, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskyOut(out, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskySolve(input2 *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskySolve(input2, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.CholeskySolveOut(out, input2, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float64, bitWidth int64)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ChooseQparamsOptimized(input, numel, nBins, ratio, bitWidth)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustClamp(min *Scalar, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Clamp(min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClamp_(min *Scalar, max *Scalar)() {
err := ts.Clamp_(min, max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMax(max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMax(max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMax_(max *Scalar)() {
err := ts.ClampMax_(max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMaxOut(out *Tensor, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMaxOut(out, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMaxTensor(max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMaxTensor(max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMaxTensor_(max *Tensor)() {
err := ts.ClampMaxTensor_(max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMaxTensorOut(out *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMaxTensorOut(out, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMin(min *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMin(min, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMin_(min *Scalar)() {
err := ts.ClampMin_(min)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMinOut(out *Tensor, min *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMinOut(out, min, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMinTensor(min *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMinTensor(min, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampMinTensor_(min *Tensor)() {
err := ts.ClampMinTensor_(min)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampMinTensorOut(out *Tensor, min *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampMinTensorOut(out, min, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClampOut(out, min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampTensor(min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClampTensor_(min *Tensor, max *Tensor)() {
err := ts.ClampTensor_(min, max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClampTensorOut(out, min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClip(min *Scalar, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Clip(min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClip_(min *Scalar, max *Scalar)() {
err := ts.Clip_(min, max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClipOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ClipOut(out, min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClipTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClipTensor(min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustClipTensor_(min *Tensor, max *Tensor)() {
err := ts.ClipTensor_(min, max)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ClipTensorOut(out, min, max, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCoalesce(del bool)(retVal *Tensor) {
retVal, err := ts.Coalesce(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCol2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) {
retVal, err := Col2imBackward(gradOutput, kernelSize, dilation, padding, stride)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCol2imBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) {
retVal, err := Col2imBackwardGradInput(gradInput, gradOutput, kernelSize, dilation, padding, stride)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCol2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustColIndices(del bool)(retVal *Tensor) {
retVal, err := ts.ColIndices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustColumnStack(tensors []Tensor)(retVal *Tensor) {
retVal, err := ColumnStack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustColumnStackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) {
retVal, err := ColumnStackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCombinations(r int64, withReplacement bool, del bool)(retVal *Tensor) {
retVal, err := ts.Combinations(r, withReplacement, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustComplex(real *Tensor, imag *Tensor)(retVal *Tensor) {
retVal, err := Complex(real, imag)
if err != nil { log.Fatal(err) }
return retVal
}
func MustComplexOut(out *Tensor, real *Tensor, imag *Tensor)(retVal *Tensor) {
retVal, err := ComplexOut(out, real, imag)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConcat(tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := Concat(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConcatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := ConcatOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConj(del bool)(retVal *Tensor) {
retVal, err := ts.Conj(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConjPhysical(del bool)(retVal *Tensor) {
retVal, err := ts.ConjPhysical(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConjPhysical_()() {
err := ts.ConjPhysical_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ConjPhysicalOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConstantPadNd(pad []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConstantPadNd(pad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustContiguous(del bool)(retVal *Tensor) {
retVal, err := ts.Contiguous(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv1dPadding(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv2dPadding(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding string, dilation []int64, groups int64)(retVal *Tensor) {
retVal, err := Conv3dPadding(input, weight, bias, stride, padding, dilation, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConvDepthwise3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConvDepthwise3d(weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConvDepthwise3dBackward(gradInput *Tensor, gradWeight *Tensor, gradBias *Tensor, gradOutput *Tensor, weight *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.ConvDepthwise3dBackward(gradInput, gradWeight, gradBias, gradOutput, weight, kernelSize, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool)(retVal *Tensor) {
retVal, err := ts.ConvTbc(weight, bias, pad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pad int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.ConvTbcBackward(input, weight, bias, pad, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) {
retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) {
retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) {
retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) {
retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func MustConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) {
retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopySparseToSparse_(src *Tensor, nonBlocking bool)() {
err := ts.CopySparseToSparse_(src, nonBlocking)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCopysign(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Copysign(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopysign_(other *Tensor)() {
err := ts.Copysign_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCopysignOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CopysignOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopysignScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.CopysignScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCopysignScalar_(other *Scalar)() {
err := ts.CopysignScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCopysignScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.CopysignScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCorrcoef(del bool)(retVal *Tensor) {
retVal, err := ts.Corrcoef(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCos(del bool)(retVal *Tensor) {
retVal, err := ts.Cos(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCos_()() {
err := ts.Cos_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCosOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CosOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCosh(del bool)(retVal *Tensor) {
retVal, err := ts.Cosh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCosh_()() {
err := ts.Cosh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCoshOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CoshOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor) {
retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64)(retVal *Tensor) {
retVal, err := CosineSimilarity(x1, x2, dim, eps)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCountNonzero(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.CountNonzero(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCountNonzeroDimIntlist(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.CountNonzeroDimIntlist(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCov(correction int64, fweights *Tensor, aweights *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Cov(correction, fweights, aweights, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCross(other *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Cross(other, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCrossEntropyLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, labelSmoothing float64, del bool)(retVal *Tensor) {
retVal, err := ts.CrossEntropyLoss(target, weight, reduction, ignoreIndex, labelSmoothing, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCrossOut(out *Tensor, other *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.CrossOut(out, other, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCrowIndices(del bool)(retVal *Tensor) {
retVal, err := ts.CrowIndices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor) {
retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor) {
retVal, err := CtcLossTensor(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) {
retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) {
retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := CudnnBatchNorm(input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustCudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64, reserveSpace *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := CudnnBatchNormBackward(input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon, reserveSpace)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustCudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionAddRelu(weight *Tensor, z *Tensor, alpha *Scalar, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionAddRelu(weight, z, alpha, bias, stride, padding, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool)(retVal *Tensor) {
retVal, err := CudnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionDeprecated(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionDeprecated(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionDeprecated2(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionDeprecated2(weight, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionRelu(weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionRelu(weight, bias, stride, padding, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, allowTf32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool)(retVal *Tensor) {
retVal, err := CudnnConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionTransposeDeprecated(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionTransposeDeprecated(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnConvolutionTransposeDeprecated2(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnConvolutionTransposeDeprecated2(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnGridSampler(grid *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.CudnnGridSampler(grid, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.CudnnGridSamplerBackward(grid, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCudnnIsAcceptable(del bool)(retVal bool) {
retVal, err := ts.CudnnIsAcceptable(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCummax(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Cummax(dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCummaxOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.CummaxOut(values, indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustCummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64)(retVal *Tensor) {
retVal, err := CummaxminBackward(grad, input, indices, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCummin(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Cummin(dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCumminOut(values *Tensor, indices *Tensor, dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.CumminOut(values, indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Cumprod(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCumprod_(dim int64, dtype gotch.DType)() {
err := ts.Cumprod_(dim, dtype)
if err != nil { log.Fatal(err) }
return
}
func MustCumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor)(retVal *Tensor) {
retVal, err := CumprodBackward(grad, input, dim, output)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.CumprodOut(out, dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Cumsum(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustCumsum_(dim int64, dtype gotch.DType)() {
err := ts.Cumsum_(dim, dtype)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustCumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.CumsumOut(out, dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCumulativeTrapezoid(y *Tensor, dim int64)(retVal *Tensor) {
retVal, err := CumulativeTrapezoid(y, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustCumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) {
retVal, err := CumulativeTrapezoidX(y, x, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustData(del bool)(retVal *Tensor) {
retVal, err := ts.Data(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDeg2rad(del bool)(retVal *Tensor) {
retVal, err := ts.Deg2rad(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDeg2rad_()() {
err := ts.Deg2rad_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDeg2radOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Deg2radOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDenseDim(del bool)(retVal int64) {
retVal, err := ts.DenseDim(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDequantize(del bool)(retVal *Tensor) {
retVal, err := ts.Dequantize(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDet(del bool)(retVal *Tensor) {
retVal, err := ts.Det(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDetach(del bool)(retVal *Tensor) {
retVal, err := ts.Detach(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDetach_()() {
err := ts.Detach_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDiag(diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.Diag(diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDiagBackward(grad *Tensor, inputSizes []int64, diagonal int64)(retVal *Tensor) {
retVal, err := DiagBackward(grad, inputSizes, diagonal)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagEmbed(offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.DiagOut(out, diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagflat(offset int64, del bool)(retVal *Tensor) {
retVal, err := ts.Diagflat(offset, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) {
retVal, err := ts.Diagonal(offset, dim1, dim2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64)(retVal *Tensor) {
retVal, err := DiagonalBackward(gradOutput, inputSizes, offset, dim1, dim2)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiff(n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Diff(n, dim, prepend, append, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, append *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DiffOut(out, n, dim, prepend, append, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDigamma(del bool)(retVal *Tensor) {
retVal, err := ts.Digamma(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDigamma_()() {
err := ts.Digamma_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDigammaOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DigammaOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDist(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Dist(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiv(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Div(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDiv_(other *Tensor)() {
err := ts.Div_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DivOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivOutMode(out, other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.DivScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivScalar_(other *Scalar)() {
err := ts.DivScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivScalarMode(other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivScalarMode_(other *Scalar, roundingMode string)() {
err := ts.DivScalarMode_(other, roundingMode)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivTensorMode(other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivTensorMode_(other *Tensor, roundingMode string)() {
err := ts.DivTensorMode_(other, roundingMode)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivide(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Divide(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivide_(other *Tensor)() {
err := ts.Divide_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DivideOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideOutMode(out *Tensor, other *Tensor, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivideOutMode(out, other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.DivideScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideScalar_(other *Scalar)() {
err := ts.DivideScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivideScalarMode(other *Scalar, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivideScalarMode(other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideScalarMode_(other *Scalar, roundingMode string)() {
err := ts.DivideScalarMode_(other, roundingMode)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDivideTensorMode(other *Tensor, roundingMode string, del bool)(retVal *Tensor) {
retVal, err := ts.DivideTensorMode(other, roundingMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDivideTensorMode_(other *Tensor, roundingMode string)() {
err := ts.DivideTensorMode_(other, roundingMode)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustDot(tensor *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Dot(tensor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDotOut(out *Tensor, tensor *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.DotOut(out, tensor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDropout(input *Tensor, p float64, train bool)(retVal *Tensor) {
retVal, err := Dropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustDropout_(p float64, train bool)() {
err := ts.Dropout_(p, train)
if err != nil { log.Fatal(err) }
return
}
func MustDstack(tensors []Tensor)(retVal *Tensor) {
retVal, err := Dstack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustDstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) {
retVal, err := DstackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEig(eigenvectors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Eig(eigenvectors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustEigE(e *Tensor, v *Tensor, eigenvectors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.EigE(e, v, eigenvectors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustEinsum(equation string, tensors []Tensor)(retVal *Tensor) {
retVal, err := Einsum(equation, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustElu(del bool)(retVal *Tensor) {
retVal, err := ts.Elu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustElu_()() {
err := ts.Elu_()
if err != nil { log.Fatal(err) }
return
}
func MustEluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor) {
retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, isResult, selfOrResult)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, isResult bool, selfOrResult *Tensor)(retVal *Tensor) {
retVal, err := EluBackwardGradInput(gradInput, gradOutput, alpha, scale, inputScale, isResult, selfOrResult)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.EluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) {
retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) {
retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmbeddingBag(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := EmbeddingBag(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustEmbeddingBagPaddingIdx(weight *Tensor, indices *Tensor, offsets *Tensor, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor, includeLastOffset bool, paddingIdx []int64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := EmbeddingBagPaddingIdx(weight, indices, offsets, scaleGradByFreq, mode, sparse, perSampleWeights, includeLastOffset, paddingIdx)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func MustEmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) {
retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64)() {
err := ts.EmbeddingRenorm_(indices, maxNorm, normType)
if err != nil { log.Fatal(err) }
return
}
func MustEmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) {
retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Empty(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEmptyLike(del bool)(retVal *Tensor) {
retVal, err := ts.EmptyLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := EmptyOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := EmptyQuantized(size, qtensor, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEq(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Eq(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEq_(other *Scalar)() {
err := ts.Eq_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustEqScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.EqScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEqTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.EqTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEqTensor_(other *Tensor)() {
err := ts.EqTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustEqTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.EqTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustEqual(other *Tensor, del bool)(retVal bool) {
retVal, err := ts.Equal(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErf(del bool)(retVal *Tensor) {
retVal, err := ts.Erf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErf_()() {
err := ts.Erf_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustErfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ErfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErfc(del bool)(retVal *Tensor) {
retVal, err := ts.Erfc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErfc_()() {
err := ts.Erfc_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustErfcOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ErfcOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErfinv(del bool)(retVal *Tensor) {
retVal, err := ts.Erfinv(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustErfinv_()() {
err := ts.Erfinv_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustErfinvOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ErfinvOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExp(del bool)(retVal *Tensor) {
retVal, err := ts.Exp(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExp2(del bool)(retVal *Tensor) {
retVal, err := ts.Exp2(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExp2_()() {
err := ts.Exp2_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustExp2Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Exp2Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExp_()() {
err := ts.Exp_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustExpOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ExpOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpand(size []int64, implicit bool, del bool)(retVal *Tensor) {
retVal, err := ts.Expand(size, implicit, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpandAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ExpandAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpm1(del bool)(retVal *Tensor) {
retVal, err := ts.Expm1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExpm1_()() {
err := ts.Expm1_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustExpm1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Expm1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustExponential_(lambd float64)() {
err := ts.Exponential_(lambd)
if err != nil { log.Fatal(err) }
return
}
func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Eye(n, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := EyeM(n, m, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEyeMOut(out *Tensor, n int64, m int64)(retVal *Tensor) {
retVal, err := EyeMOut(out, n, m)
if err != nil { log.Fatal(err) }
return retVal
}
func MustEyeOut(out *Tensor, n int64)(retVal *Tensor) {
retVal, err := EyeOut(out, n)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) {
retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FakeQuantizePerChannelAffineCachemask(scale, zeroPoint, axis, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustFakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor) {
retVal, err := FakeQuantizePerChannelAffineCachemaskBackward(grad, mask)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) {
retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FakeQuantizePerTensorAffineCachemask(scale, zeroPoint, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustFakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor)(retVal *Tensor) {
retVal, err := FakeQuantizePerTensorAffineCachemaskBackward(grad, mask)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool)(retVal *Tensor) {
retVal, err := ts.FakeQuantizePerTensorAffineTensorQparams(scale, zeroPoint, quantMin, quantMax, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor) {
retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor) {
retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmPackGemmMatrixFp16(input *Tensor)(retVal *Tensor) {
retVal, err := FbgemmPackGemmMatrixFp16(input)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmPackQuantizedMatrix(input *Tensor)(retVal *Tensor) {
retVal, err := FbgemmPackQuantizedMatrix(input)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64)(retVal *Tensor) {
retVal, err := FbgemmPackQuantizedMatrixKn(input, k, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFeatureAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor) {
retVal, err := FeatureAlphaDropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFeatureAlphaDropout_(p float64, train bool)() {
err := ts.FeatureAlphaDropout_(p, train)
if err != nil { log.Fatal(err) }
return
}
func MustFeatureDropout(input *Tensor, p float64, train bool)(retVal *Tensor) {
retVal, err := FeatureDropout(input, p, train)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFeatureDropout_(p float64, train bool)() {
err := ts.FeatureDropout_(p, train)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFftFft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := FftFftfreq(n, d, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFftFftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor) {
retVal, err := FftFftfreqOut(out, n, d)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftFftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftFftshift(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.FftFftshift(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftHfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftHfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftHfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftHfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIfftshift(dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.FftIfftshift(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIhfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIhfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIhfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftIrfftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfft(n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfft2(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfft2(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfft2Out(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfftOut(out *Tensor, n []int64, dim int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfftOut(out, n, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := FftRfftfreq(n, d, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFftRfftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor) {
retVal, err := FftRfftfreqOut(out, n, d)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfftn(s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, del bool)(retVal *Tensor) {
retVal, err := ts.FftRfftnOut(out, s, dim, norm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFill_(value *Scalar)() {
err := ts.Fill_(value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFillDiagonal_(fillValue *Scalar, wrap bool)() {
err := ts.FillDiagonal_(fillValue, wrap)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFillTensor_(value *Tensor)() {
err := ts.FillTensor_(value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFix(del bool)(retVal *Tensor) {
retVal, err := ts.Fix(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFix_()() {
err := ts.Fix_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFixOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FixOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFlatten(startDim int64, endDim int64, del bool)(retVal *Tensor) {
retVal, err := ts.Flatten(startDim, endDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFlattenDenseTensors(tensors []Tensor)(retVal *Tensor) {
retVal, err := FlattenDenseTensors(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFlip(dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Flip(dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFliplr(del bool)(retVal *Tensor) {
retVal, err := ts.Fliplr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFlipud(del bool)(retVal *Tensor) {
retVal, err := ts.Flipud(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPower(exponent *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloatPower(exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPower_(exponent *Scalar)() {
err := ts.FloatPower_(exponent)
if err != nil { log.Fatal(err) }
return
}
func MustFloatPowerScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) {
retVal, err := FloatPowerScalar(selfScalar, exponent)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) {
retVal, err := FloatPowerScalarOut(out, selfScalar, exponent)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPowerTensor_(exponent *Tensor)() {
err := ts.FloatPowerTensor_(exponent)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFloatPowerTensorScalar(exponent *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FloatPowerTensorScalar(exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FloatPowerTensorScalarOut(out, exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloatPowerTensorTensorOut(out, exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloor(del bool)(retVal *Tensor) {
retVal, err := ts.Floor(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloor_()() {
err := ts.Floor_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFloorDivide(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloorDivide(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloorDivide_(other *Tensor)() {
err := ts.FloorDivide_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFloorDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloorDivideOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloorDivideScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FloorDivideScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFloorDivideScalar_(other *Scalar)() {
err := ts.FloorDivideScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFloorOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FloorOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmax(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Fmax(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FmaxOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmin(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Fmin(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFminOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FminOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmod(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Fmod(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmod_(other *Scalar)() {
err := ts.Fmod_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFmodScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FmodScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmodTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FmodTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFmodTensor_(other *Tensor)() {
err := ts.FmodTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFmodTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FmodTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFrac(del bool)(retVal *Tensor) {
retVal, err := ts.Frac(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFrac_()() {
err := ts.Frac_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustFracOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FracOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool2d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FractionalMaxPool2d(kernelSize, outputSize, randomSamples, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FractionalMaxPool2dBackwardGradInput(gradInput, gradOutput, kernelSize, outputSize, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FractionalMaxPool2dOutput(output, indices, kernelSize, outputSize, randomSamples, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFractionalMaxPool3d(kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FractionalMaxPool3d(kernelSize, outputSize, randomSamples, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.FractionalMaxPool3dBackwardGradInput(gradInput, gradOutput, kernelSize, outputSize, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kernelSize []int64, outputSize []int64, randomSamples *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FractionalMaxPool3dOutput(output, indices, kernelSize, outputSize, randomSamples, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFrexp(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Frexp(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.FrexpTensorOut(mantissa, exponent, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustFrobeniusNorm(del bool)(retVal *Tensor) {
retVal, err := ts.FrobeniusNorm(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFrobeniusNormDim(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.FrobeniusNormDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Full(size, fillValue, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFullLike(fillValue *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.FullLike(fillValue, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustFullOut(out *Tensor, size []int64, fillValue *Scalar)(retVal *Tensor) {
retVal, err := FullOut(out, size, fillValue)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustFusedMovingAvgObsFakeQuant(observerOn *Tensor, fakeQuantOn *Tensor, runningMin *Tensor, runningMax *Tensor, scale *Tensor, zeroPoint *Tensor, averagingConst float64, quantMin int64, quantMax int64, chAxis int64, perRowFakeQuant bool, symmetricQuant bool, del bool)(retVal *Tensor) {
retVal, err := ts.FusedMovingAvgObsFakeQuant(observerOn, fakeQuantOn, runningMin, runningMax, scale, zeroPoint, averagingConst, quantMin, quantMax, chAxis, perRowFakeQuant, symmetricQuant, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGather(dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts.Gather(dim, index, sparseGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts.GatherBackward(grad, dim, index, sparseGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) {
retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGcd(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Gcd(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGcd_(other *Tensor)() {
err := ts.Gcd_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGcdOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GcdOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGe(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Ge(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGe_(other *Scalar)() {
err := ts.Ge_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GeScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GeTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeTensor_(other *Tensor)() {
err := ts.GeTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GeTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGelu(del bool)(retVal *Tensor) {
retVal, err := ts.Gelu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeluBackward(grad *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GeluBackward(grad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeluBackwardGradInput(gradInput *Tensor, grad *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GeluBackwardGradInput(gradInput, grad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GeluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGeometric_(p float64)() {
err := ts.Geometric_(p)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGeqrf(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Geqrf(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustGeqrfA(a *Tensor, tau *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.GeqrfA(a, tau, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustGer(vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Ger(vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGerOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GerOut(out, vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGlu(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.Glu(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGluBackward(gradOutput *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.GluBackward(gradOutput, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.GluBackwardGradInput(gradInput, gradOutput, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGluOut(out *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.GluOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGrad(del bool)(retVal *Tensor) {
retVal, err := ts.Grad(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreater(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Greater(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreater_(other *Scalar)() {
err := ts.Greater_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGreaterEqual(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterEqual(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterEqual_(other *Scalar)() {
err := ts.GreaterEqual_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGreaterEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterEqualScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterEqualTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterEqualTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterEqualTensor_(other *Tensor)() {
err := ts.GreaterEqualTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGreaterEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterEqualTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGreaterTensor_(other *Tensor)() {
err := ts.GreaterTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGreaterTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GreaterTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler2dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := GridSampler2dBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustGridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) {
retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGridSampler3dBackward(gradOutput *Tensor, input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := GridSampler3dBackward(gradOutput, input, grid, interpolationMode, paddingMode, alignCorners)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustGroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool)(retVal *Tensor) {
retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGru(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := Gru(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) {
retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustGruData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := GruData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustGt(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Gt(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGt_(other *Scalar)() {
err := ts.Gt_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.GtScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGtTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GtTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustGtTensor_(other *Tensor)() {
err := ts.GtTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustGtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.GtTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HammingWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HammingWindowPeriodicAlpha(windowLength, periodic, alpha, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HammingWindowPeriodicAlphaBeta(windowLength, periodic, alpha, beta, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HannWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := HannWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardshrink(del bool)(retVal *Tensor) {
retVal, err := ts.Hardshrink(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.HardshrinkBackward(gradOut, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.HardshrinkBackwardGradInput(gradInput, gradOut, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardshrinkOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardshrinkOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardsigmoid(del bool)(retVal *Tensor) {
retVal, err := ts.Hardsigmoid(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardsigmoid_()() {
err := ts.Hardsigmoid_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHardsigmoidBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardsigmoidBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardsigmoidBackwardGradInput(gradInput, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardsigmoidOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardsigmoidOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardswish(del bool)(retVal *Tensor) {
retVal, err := ts.Hardswish(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardswish_()() {
err := ts.Hardswish_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHardswishBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardswishBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardswishOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardswishOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardtanh(del bool)(retVal *Tensor) {
retVal, err := ts.Hardtanh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardtanh_()() {
err := ts.Hardtanh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.HardtanhBackwardGradInput(gradInput, gradOutput, minVal, maxVal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHardtanhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HardtanhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHeaviside(values *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Heaviside(values, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHeaviside_(values *Tensor)() {
err := ts.Heaviside_(values)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHeavisideOut(out *Tensor, values *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HeavisideOut(out, values, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHistc(bins int64, del bool)(retVal *Tensor) {
retVal, err := ts.Histc(bins, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHistcOut(out *Tensor, bins int64, del bool)(retVal *Tensor) {
retVal, err := ts.HistcOut(out, bins, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) {
retVal, err := Hspmm(mat1, mat2)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) {
retVal, err := HspmmOut(out, mat1, mat2)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHstack(tensors []Tensor)(retVal *Tensor) {
retVal, err := Hstack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustHstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) {
retVal, err := HstackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHuberLoss(target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) {
retVal, err := ts.HuberLoss(target, reduction, delta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHuberLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) {
retVal, err := ts.HuberLossBackward(gradOutput, target, reduction, delta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) {
retVal, err := ts.HuberLossBackwardOut(gradInput, gradOutput, target, reduction, delta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHuberLossOut(out *Tensor, target *Tensor, reduction int64, delta float64, del bool)(retVal *Tensor) {
retVal, err := ts.HuberLossOut(out, target, reduction, delta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHypot(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Hypot(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustHypot_(other *Tensor)() {
err := ts.Hypot_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustHypotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.HypotOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustI0(del bool)(retVal *Tensor) {
retVal, err := ts.I0(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustI0_()() {
err := ts.I0_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustI0Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.I0Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIgamma(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Igamma(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIgamma_(other *Tensor)() {
err := ts.Igamma_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIgammaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IgammaOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIgammac(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Igammac(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIgammac_(other *Tensor)() {
err := ts.Igammac_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIgammacOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IgammacOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIm2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) {
retVal, err := Im2colBackward(gradOutput, inputSize, kernelSize, dilation, padding, stride)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIm2colBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) {
retVal, err := Im2colBackwardGradInput(gradInput, gradOutput, inputSize, kernelSize, dilation, padding, stride)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIm2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustImag(del bool)(retVal *Tensor) {
retVal, err := ts.Imag(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexAdd(dim, index, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexAdd_(dim int64, index *Tensor, source *Tensor)() {
err := ts.IndexAdd_(dim, index, source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexAddAlpha(dim int64, index *Tensor, source *Tensor, alpha *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.IndexAddAlpha(dim, index, source, alpha, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexAddAlpha_(dim int64, index *Tensor, source *Tensor, alpha *Scalar)() {
err := ts.IndexAddAlpha_(dim, index, source, alpha)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexCopy(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexCopy(dim, index, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexCopy_(dim int64, index *Tensor, source *Tensor)() {
err := ts.IndexCopy_(dim, index, source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexFill(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.IndexFill(dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexFill_(dim int64, index *Tensor, value *Scalar)() {
err := ts.IndexFill_(dim, index, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexFillIntTensor(dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexFillIntTensor_(dim int64, index *Tensor, value *Tensor)() {
err := ts.IndexFillIntTensor_(dim, index, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustIndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexSelect(dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor)(retVal *Tensor) {
retVal, err := IndexSelectBackward(grad, selfSizes, dim, index)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IndexSelectOut(out, dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIndices(del bool)(retVal *Tensor) {
retVal, err := ts.Indices(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInfinitelyDifferentiableGeluBackward(grad *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.InfinitelyDifferentiableGeluBackward(grad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInner(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Inner(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInnerOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.InnerOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustInstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor) {
retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIntRepr(del bool)(retVal *Tensor) {
retVal, err := ts.IntRepr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInverse(del bool)(retVal *Tensor) {
retVal, err := ts.Inverse(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustInverseOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.InverseOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsCoalesced(del bool)(retVal bool) {
retVal, err := ts.IsCoalesced(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsComplex(del bool)(retVal bool) {
retVal, err := ts.IsComplex(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsConj(del bool)(retVal bool) {
retVal, err := ts.IsConj(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsDistributed(del bool)(retVal bool) {
retVal, err := ts.IsDistributed(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsFloatingPoint(del bool)(retVal bool) {
retVal, err := ts.IsFloatingPoint(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsInference(del bool)(retVal bool) {
retVal, err := ts.IsInference(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsLeaf(del bool)(retVal bool) {
retVal, err := ts.IsLeaf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsNeg(del bool)(retVal bool) {
retVal, err := ts.IsNeg(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsNonzero(del bool)(retVal bool) {
retVal, err := ts.IsNonzero(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsPinned(device gotch.Device, del bool)(retVal bool) {
retVal, err := ts.IsPinned(device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsSameSize(other *Tensor, del bool)(retVal bool) {
retVal, err := ts.IsSameSize(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsSetTo(tensor *Tensor, del bool)(retVal bool) {
retVal, err := ts.IsSetTo(tensor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsSigned(del bool)(retVal bool) {
retVal, err := ts.IsSigned(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsVulkanAvailable()(retVal bool) {
retVal, err := IsVulkanAvailable()
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal *Tensor) {
retVal, err := ts.Isclose(other, rtol, atol, equalNan, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsfinite(del bool)(retVal *Tensor) {
retVal, err := ts.Isfinite(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsin(elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := Isin(elements, testElements, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinScalarTensor(element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinScalarTensor(element, testElements, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinScalarTensorOut(out *Tensor, element *Scalar, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinScalarTensorOut(out, element, testElements, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinTensorScalar(elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinTensorScalar(elements, testElement, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinTensorScalarOut(out *Tensor, elements *Tensor, testElement *Scalar, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinTensorScalarOut(out, elements, testElement, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func MustIsinTensorTensorOut(out *Tensor, elements *Tensor, testElements *Tensor, assumeUnique bool, invert bool)(retVal *Tensor) {
retVal, err := IsinTensorTensorOut(out, elements, testElements, assumeUnique, invert)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsinf(del bool)(retVal *Tensor) {
retVal, err := ts.Isinf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsnan(del bool)(retVal *Tensor) {
retVal, err := ts.Isnan(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsneginf(del bool)(retVal *Tensor) {
retVal, err := ts.Isneginf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsneginfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IsneginfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsposinf(del bool)(retVal *Tensor) {
retVal, err := ts.Isposinf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsposinfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.IsposinfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIsreal(del bool)(retVal *Tensor) {
retVal, err := ts.Isreal(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustIstft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool)(retVal *Tensor) {
retVal, err := ts.Istft(nFft, hopLength, winLength, window, center, normalized, onesided, length, returnComplex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := KaiserWindow(windowLength, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := KaiserWindowBeta(windowLength, periodic, beta, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustKaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := KaiserWindowPeriodic(windowLength, periodic, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKlDiv(target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor) {
retVal, err := ts.KlDiv(target, reduction, logTarget, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor) {
retVal, err := ts.KlDivBackward(gradOutput, target, reduction, logTarget, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKron(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Kron(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKronOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.KronOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustKthvalue(k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Kthvalue(k, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustKthvalueValues(values *Tensor, indices *Tensor, k int64, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.KthvalueValues(values, indices, k, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustL1Loss(target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.L1Loss(target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.L1LossBackward(gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.L1LossBackwardGradInput(gradInput, gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.L1LossOut(out, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool)(retVal *Tensor) {
retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLcm(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Lcm(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLcm_(other *Tensor)() {
err := ts.Lcm_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLcmOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LcmOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLdexp(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Ldexp(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLdexp_(other *Tensor)() {
err := ts.Ldexp_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLdexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LdexpOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLe(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Le(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLe_(other *Scalar)() {
err := ts.Le_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LeScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LeTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeTensor_(other *Tensor)() {
err := ts.LeTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LeTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeakyRelu(del bool)(retVal *Tensor) {
retVal, err := ts.LeakyRelu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeakyRelu_()() {
err := ts.LeakyRelu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor) {
retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor) {
retVal, err := ts.LeakyReluBackwardGradInput(gradInput, gradOutput, negativeSlope, selfIsResult, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLeakyReluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LeakyReluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLerp(end *Tensor, weight *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Lerp(end, weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLerp_(end *Tensor, weight *Scalar)() {
err := ts.Lerp_(end, weight)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LerpScalarOut(out, end, weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLerpTensor(end *Tensor, weight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LerpTensor(end, weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLerpTensor_(end *Tensor, weight *Tensor)() {
err := ts.LerpTensor_(end, weight)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LerpTensorOut(out, end, weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLess(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Less(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLess_(other *Scalar)() {
err := ts.Less_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLessEqual(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LessEqual(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessEqual_(other *Scalar)() {
err := ts.LessEqual_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLessEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LessEqualScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessEqualTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LessEqualTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessEqualTensor_(other *Tensor)() {
err := ts.LessEqualTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLessEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LessEqualTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LessScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LessTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLessTensor_(other *Tensor)() {
err := ts.LessTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLessTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LessTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLgamma(del bool)(retVal *Tensor) {
retVal, err := ts.Lgamma(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLgamma_()() {
err := ts.Lgamma_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLgammaOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LgammaOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCholesky(upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCholesky(upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCholeskyEx(upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgCholeskyEx(upper, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgCholeskyExL(l *Tensor, info *Tensor, upper bool, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgCholeskyExL(l, info, upper, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCholeskyOut(out, upper, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCond(p *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCond(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCondOut(out *Tensor, p *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCondOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCondPStr(p string, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCondPStr(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgCondPStrOut(out *Tensor, p string, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgCondPStrOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgDet(del bool)(retVal *Tensor) {
retVal, err := ts.LinalgDet(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgDetOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgDetOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgEig(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgEig(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgEigOut(eigenvalues, eigenvectors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgEigh(uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgEigh(uPLO, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgEighEigvals(eigvals, eigvecs, uPLO, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgEigvals(del bool)(retVal *Tensor) {
retVal, err := ts.LinalgEigvals(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgEigvalsOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgEigvalsOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgEigvalsh(uPLO string, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgEigvalsh(uPLO, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgEigvalshOut(out *Tensor, uPLO string, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgEigvalshOut(out, uPLO, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgHouseholderProduct(input *Tensor, tau *Tensor)(retVal *Tensor) {
retVal, err := LinalgHouseholderProduct(input, tau)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor)(retVal *Tensor) {
retVal, err := LinalgHouseholderProductOut(out, input, tau)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgInv(del bool)(retVal *Tensor) {
retVal, err := ts.LinalgInv(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgInvEx(checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgInvEx(checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgInvExInverse(inverse *Tensor, info *Tensor, checkErrors bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgInvExInverse(inverse, info, checkErrors, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgInvOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgInvOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgLstsq(b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := ts.LinalgLstsq(b, rcond, driver, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func(ts *Tensor) MustLinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tensor, singularValues *Tensor, b *Tensor, rcond []float64, driver string, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor) {
retVal0, retVal1, retVal2, retVal3, err := ts.LinalgLstsqOut(solution, residuals, rank, singularValues, b, rcond, driver, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3
}
func(ts *Tensor) MustLinalgMatmul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatmul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatmulOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixPower(n int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixPower(n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixPowerOut(out, n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixRank(tol []float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixRank(tol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgMatrixRankOut(out *Tensor, tol []float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgMatrixRankOut(out, tol, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor) {
retVal, err := LinalgMatrixRankOutTolTensor(out, input, tol, hermitian)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool)(retVal *Tensor) {
retVal, err := LinalgMatrixRankTolTensor(input, tol, hermitian)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMultiDot(tensors []Tensor)(retVal *Tensor) {
retVal, err := LinalgMultiDot(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgMultiDotOut(out *Tensor, tensors []Tensor)(retVal *Tensor) {
retVal, err := LinalgMultiDotOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgNorm(ord, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgNormOrdStr(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgNormOrdStr(ord, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgNormOrdStrOut(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgNormOrdStrOut(out, ord, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgNormOut(out, ord, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinv(rcond float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinv(rcond, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvOut(out, rcond, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvOutRcondTensor(out, rcond, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgPinvRcondTensor(rcond, hermitian, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgQr(mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgQr(mode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgQrOut(q *Tensor, r *Tensor, mode string, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgQrOut(q, r, mode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgSlogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgSlogdet(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LinalgSlogdetOut(sign, logabsdet, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLinalgSolve(input *Tensor, other *Tensor)(retVal *Tensor) {
retVal, err := LinalgSolve(input, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgSolveOut(out *Tensor, input *Tensor, other *Tensor)(retVal *Tensor) {
retVal, err := LinalgSolveOut(out, input, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgSvd(fullMatrices bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.LinalgSvd(fullMatrices, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustLinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, fullMatrices bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.LinalgSvdU(u, s, vh, fullMatrices, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLinalgSvdvals(input *Tensor)(retVal *Tensor) {
retVal, err := LinalgSvdvals(input)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinalgSvdvalsOut(out *Tensor, input *Tensor)(retVal *Tensor) {
retVal, err := LinalgSvdvalsOut(out, input)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgTensorinv(ind int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgTensorinv(ind, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgTensorinvOut(out *Tensor, ind int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgTensorinvOut(out, ind, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgTensorsolve(other *Tensor, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgTensorsolve(other, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.LinalgTensorsolveOut(out, other, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinear(input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := Linear(input, weight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) {
retVal, err := LinearOut(out, input, weight, bias)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinspace(start *Scalar, end *Scalar, steps []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64)(retVal *Tensor) {
retVal, err := LinspaceOut(out, start, end, steps)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog(del bool)(retVal *Tensor) {
retVal, err := ts.Log(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog10(del bool)(retVal *Tensor) {
retVal, err := ts.Log10(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog10_()() {
err := ts.Log10_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLog10Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Log10Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog1p(del bool)(retVal *Tensor) {
retVal, err := ts.Log1p(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog1p_()() {
err := ts.Log1p_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLog1pOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Log1pOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog2(del bool)(retVal *Tensor) {
retVal, err := ts.Log2(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog2_()() {
err := ts.Log2_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLog2Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Log2Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLog_()() {
err := ts.Log_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogNormal_(mean float64, std float64)() {
err := ts.LogNormal_(mean, std)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSigmoid(del bool)(retVal *Tensor) {
retVal, err := ts.LogSigmoid(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogSigmoidBackwardGradInput(gradInput, gradOutput, buffer, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSigmoidOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogSigmoidOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.LogSoftmax(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogaddexp(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Logaddexp(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogaddexp2(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Logaddexp2(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogaddexp2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Logaddexp2Out(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogaddexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogaddexpOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogcumsumexp(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.Logcumsumexp(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.LogcumsumexpOut(out, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogdet(del bool)(retVal *Tensor) {
retVal, err := ts.Logdet(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalAnd(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalAnd(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalAnd_(other *Tensor)() {
err := ts.LogicalAnd_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogicalAndOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalAndOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalNot(del bool)(retVal *Tensor) {
retVal, err := ts.LogicalNot(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalNot_()() {
err := ts.LogicalNot_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogicalNotOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalNotOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalOr(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalOr(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalOr_(other *Tensor)() {
err := ts.LogicalOr_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogicalOrOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalOrOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalXor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalXor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogicalXor_(other *Tensor)() {
err := ts.LogicalXor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogicalXorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LogicalXorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogit(eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.Logit(eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogit_(eps []float64)() {
err := ts.Logit_(eps)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLogitBackward(gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.LogitBackward(gradOutput, eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.LogitBackwardGradInput(gradInput, gradOutput, eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.LogitOut(out, eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLogspace(start *Scalar, end *Scalar, steps []int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64, base float64)(retVal *Tensor) {
retVal, err := LogspaceOut(out, start, end, steps, base)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Logsumexp(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.LogsumexpOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLstm(input *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := Lstm(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := LstmCell(input, hx, wIh, wHh, bIh, bHh)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustLstmData(data *Tensor, batchSizes *Tensor, hx []Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LstmData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustLstsq(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Lstsq(a, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLstsqX(x *Tensor, qr *Tensor, a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.LstsqX(x, qr, a, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustLt(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Lt(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLt_(other *Scalar)() {
err := ts.Lt_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.LtScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLtTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LtTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLtTensor_(other *Tensor)() {
err := ts.LtTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustLtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LtTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLuSolve(lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LuSolve(lUData, lUPivots, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustLuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustLuUnpack(lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LuUnpack(lUData, lUPivots, unpackData, unpackPivots)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustLuUnpackOut(p *Tensor, l *Tensor, u *Tensor, lUData *Tensor, lUPivots *Tensor, unpackData bool, unpackPivots bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := LuUnpackOut(p, l, u, lUData, lUPivots, unpackData, unpackPivots)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustMarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor) {
retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedFill(mask *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedFill(mask, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedFill_(mask *Tensor, value *Scalar)() {
err := ts.MaskedFill_(mask, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMaskedFillTensor(mask *Tensor, value *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedFillTensor(mask, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedFillTensor_(mask *Tensor, value *Tensor)() {
err := ts.MaskedFillTensor_(mask, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMaskedScatter(mask *Tensor, source *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedScatter(mask, source, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedScatter_(mask *Tensor, source *Tensor)() {
err := ts.MaskedScatter_(mask, source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMaskedSelect(mask *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedSelect(mask, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor)(retVal *Tensor) {
retVal, err := MaskedSelectBackward(grad, input, mask)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaskedSelectOut(out *Tensor, mask *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaskedSelectOut(out, mask, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatmul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Matmul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MatmulOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixExp(del bool)(retVal *Tensor) {
retVal, err := ts.MatrixExp(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixExpBackward(grad *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MatrixExpBackward(grad, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixPower(n int64, del bool)(retVal *Tensor) {
retVal, err := ts.MatrixPower(n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor) {
retVal, err := ts.MatrixPowerOut(out, n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixRank(symmetric bool, del bool)(retVal *Tensor) {
retVal, err := ts.MatrixRank(symmetric, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMatrixRankTol(tol float64, symmetric bool, del bool)(retVal *Tensor) {
retVal, err := ts.MatrixRankTol(tol, symmetric, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMax(del bool)(retVal *Tensor) {
retVal, err := ts.Max(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxDimMax(max, maxValues, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxOther(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxOther(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool1dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool1dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool2dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool2dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool2dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool2dWithIndicesBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool2dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool2dWithIndicesOut(out, indices, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool3dWithIndices(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool3dWithIndices(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool3dWithIndicesBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaxPool3dWithIndicesBackwardGradInput(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxPool3dWithIndicesOut(out *Tensor, indices *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MaxPool3dWithIndicesOut(out, indices, kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMaxUnpool2d(indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool2d(indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool2dBackward(gradOutput, indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool2dBackwardGradInput(gradInput, gradOutput, indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool3dBackward(gradOutput, indices, outputSize, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool3dBackwardGradInput(gradInput, gradOutput, indices, outputSize, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaximum(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Maximum(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMaximumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MaximumOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMean(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Mean(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMeanDim(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.MeanDim(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMedian(del bool)(retVal *Tensor) {
retVal, err := ts.Median(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MedianDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MedianDimValues(values, indices, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMin(del bool)(retVal *Tensor) {
retVal, err := ts.Min(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MinDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MinDimMin(min, minIndices, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMinOther(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MinOther(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MinOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinimum(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Minimum(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMinimumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MinimumOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, exponentialAverageFactor float64, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := MiopenBatchNorm(input, weight, bias, runningMean, runningVar, training, exponentialAverageFactor, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustMiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, runningMean *Tensor, runningVar *Tensor, saveMean *Tensor, saveVar *Tensor, epsilon float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := MiopenBatchNormBackward(input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, epsilon)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustMiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMiopenConvolutionBackwardBias(gradOutput *Tensor)(retVal *Tensor) {
retVal, err := MiopenConvolutionBackwardBias(gradOutput)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMiopenConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal *Tensor) {
retVal, err := MiopenConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMiopenConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal *Tensor) {
retVal, err := MiopenConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal *Tensor) {
retVal, err := MiopenDepthwiseConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) {
retVal, err := ts.MiopenDepthwiseConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMiopenRnn(input *Tensor, weight []Tensor, weightStride0 int64, hx *Tensor, cx *Tensor, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, dropout float64, train bool, bidirectional bool, batchSizes []int64, dropoutState *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, retVal3 *Tensor, retVal4 *Tensor) {
retVal0, retVal1, retVal2, retVal3, retVal4, err := MiopenRnn(input, weight, weightStride0, hx, cx, mode, hiddenSize, numLayers, batchFirst, dropout, train, bidirectional, batchSizes, dropoutState)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2, retVal3, retVal4
}
func(ts *Tensor) MustMish(del bool)(retVal *Tensor) {
retVal, err := ts.Mish(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMish_()() {
err := ts.Mish_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMishBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MishBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMishOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MishOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnAdaptiveAvgPool2dBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool)(retVal *Tensor) {
retVal, err := MkldnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, biasDefined)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnConvolutionBackwardWeights(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.MkldnnConvolutionBackwardWeights(weightSize, gradOutput, padding, stride, dilation, groups, biasDefined, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMkldnnLinear(weight *Tensor, bias *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnLinear(weight, bias, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Tensor)(retVal *Tensor) {
retVal, err := MkldnnLinearBackwardInput(inputSize, gradOutput, weight)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tensor, biasDefined bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := MkldnnLinearBackwardWeights(gradOutput, input, weight, biasDefined)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnMaxPool2dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor) {
retVal, err := MkldnnMaxPool2dBackward(gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnMaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustMkldnnMaxPool3dBackward(gradOutput *Tensor, output *Tensor, input *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool)(retVal *Tensor) {
retVal, err := MkldnnMaxPool3dBackward(gradOutput, output, input, kernelSize, stride, padding, dilation, ceilMode)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) {
retVal, err := ts.MkldnnReorderConv3dWeight(padding, stride, dilation, groups, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMm(mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Mm(mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MmOut(out, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMode(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Mode(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustModeValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.ModeValues(values, indices, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustMoveaxis(source []int64, destination []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Moveaxis(source, destination, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMoveaxisInt(source int64, destination int64, del bool)(retVal *Tensor) {
retVal, err := ts.MoveaxisInt(source, destination, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMovedim(source []int64, destination []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Movedim(source, destination, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMovedimInt(source int64, destination int64, del bool)(retVal *Tensor) {
retVal, err := ts.MovedimInt(source, destination, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMseLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MseLoss(target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MseLossBackwardGradInput(gradInput, gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMseLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MseLossOut(out, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMsort(del bool)(retVal *Tensor) {
retVal, err := ts.Msort(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMsortOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MsortOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMul(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Mul(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMul_(other *Tensor)() {
err := ts.Mul_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MulOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMulScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.MulScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMulScalar_(other *Scalar)() {
err := ts.MulScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MultiMarginLossBackwardGradInput(gradInput, gradOutput, target, p, margin, weight, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultilabelMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MultilabelMarginLoss(target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MultilabelMarginLossBackwardGradInput(gradInput, gradOutput, target, reduction, isTarget, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultinomial(numSamples int64, replacement bool, del bool)(retVal *Tensor) {
retVal, err := ts.Multinomial(numSamples, replacement, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool)(retVal *Tensor) {
retVal, err := ts.MultinomialOut(out, numSamples, replacement, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiply(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Multiply(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiply_(other *Tensor)() {
err := ts.Multiply_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMultiplyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MultiplyOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiplyScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.MultiplyScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMultiplyScalar_(other *Scalar)() {
err := ts.MultiplyScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMv(vec *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Mv(vec, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMvOut(out *Tensor, vec *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.MvOut(out, vec, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMvlgamma(p int64, del bool)(retVal *Tensor) {
retVal, err := ts.Mvlgamma(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustMvlgamma_(p int64)() {
err := ts.Mvlgamma_(p)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustMvlgammaOut(out *Tensor, p int64, del bool)(retVal *Tensor) {
retVal, err := ts.MvlgammaOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanToNum(nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor) {
retVal, err := ts.NanToNum(nan, posinf, neginf, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanToNum_(nan []float64, posinf []float64, neginf []float64)() {
err := ts.NanToNum_(nan, posinf, neginf)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNanToNumOut(out *Tensor, nan []float64, posinf []float64, neginf []float64, del bool)(retVal *Tensor) {
retVal, err := ts.NanToNumOut(out, nan, posinf, neginf, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanmean(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Nanmean(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanmeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NanmeanOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanmedian(del bool)(retVal *Tensor) {
retVal, err := ts.Nanmedian(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanmedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.NanmedianDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustNanmedianDimValues(values *Tensor, indices *Tensor, dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.NanmedianDimValues(values, indices, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustNanquantile(q *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Nanquantile(q, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileNew(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileNew(q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileNewOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileNewOut(out, q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileNewScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileNewScalar(q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileNewScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileNewScalarOut(out, q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileOut(out, q, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileScalar(q float64, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileScalar(q, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNanquantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NanquantileScalarOut(out, q, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNansum(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Nansum(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNansumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NansumDimIntlist(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNansumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NansumIntlistOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNarrow(dim int64, start int64, length int64, del bool)(retVal *Tensor) {
retVal, err := ts.Narrow(dim, start, length, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool)(retVal *Tensor) {
retVal, err := ts.NarrowCopy(dim, start, length, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNarrowCopyOut(out *Tensor, dim int64, start int64, length int64, del bool)(retVal *Tensor) {
retVal, err := ts.NarrowCopyOut(out, dim, start, length, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNarrowTensor(dim int64, start *Tensor, length int64, del bool)(retVal *Tensor) {
retVal, err := ts.NarrowTensor(dim, start, length, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeBatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustNativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeBatchNormOut(out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, training, momentum, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustNativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int64, hxW int64, group int64, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeGroupNorm(input, weight, bias, n, c, hxW, group, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func MustNativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := NativeLayerNorm(input, normalizedShape, weight, bias, eps)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustNativeNorm(del bool)(retVal *Tensor) {
retVal, err := ts.NativeNorm(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNativeNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NativeNormScalaroptDimDtype(p, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNe(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Ne(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNe_(other *Scalar)() {
err := ts.Ne_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.NeScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNeTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NeTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNeTensor_(other *Tensor)() {
err := ts.NeTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NeTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNeg(del bool)(retVal *Tensor) {
retVal, err := ts.Neg(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNeg_()() {
err := ts.Neg_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNegOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NegOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNegative(del bool)(retVal *Tensor) {
retVal, err := ts.Negative(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNegative_()() {
err := ts.Negative_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNegativeOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NegativeOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewEmptyStrided(size, stride, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewOnes(size, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNextafter(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Nextafter(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNextafter_(other *Tensor)() {
err := ts.Nextafter_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNextafterOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NextafterOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss2dBackwardGradInput(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NllLossBackwardGradInput(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLossNd(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLossNd(target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) {
retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNonzero(del bool)(retVal *Tensor) {
retVal, err := ts.Nonzero(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNonzeroOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NonzeroOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNorm(del bool)(retVal *Tensor) {
retVal, err := ts.Norm(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormDtypeOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NormDtypeOut(out, p, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNormExceptDim(v *Tensor, pow int64, dim int64)(retVal *Tensor) {
retVal, err := NormExceptDim(v, pow, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NormOut(out, p, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormScalaroptDim(p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NormScalaroptDim(p, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormScalaroptDimDtype(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NormScalaroptDimDtype(p, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.NormScalaroptDtype(p, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNormal(out *Tensor, mean *Tensor, std float64)(retVal *Tensor) {
retVal, err := Normal(out, mean, std)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNormal_(mean float64, std float64)() {
err := ts.Normal_(mean, std)
if err != nil { log.Fatal(err) }
return
}
func MustNormalFloatFloatOut(out *Tensor, mean float64, std float64, size []int64)(retVal *Tensor) {
retVal, err := NormalFloatFloatOut(out, mean, std, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNormalFloatTensorOut(out *Tensor, mean float64, std *Tensor)(retVal *Tensor) {
retVal, err := NormalFloatTensorOut(out, mean, std)
if err != nil { log.Fatal(err) }
return retVal
}
func MustNormalTensorTensorOut(out *Tensor, mean *Tensor, std *Tensor)(retVal *Tensor) {
retVal, err := NormalTensorTensorOut(out, mean, std)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNotEqual(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.NotEqual(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNotEqual_(other *Scalar)() {
err := ts.NotEqual_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNotEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.NotEqualScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNotEqualTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NotEqualTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNotEqualTensor_(other *Tensor)() {
err := ts.NotEqualTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustNotEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.NotEqualTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNuclearNorm(keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NuclearNorm(keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNuclearNormDim(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NuclearNormDim(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNuclearNormDimOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NuclearNormDimOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNuclearNormOut(out *Tensor, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.NuclearNormOut(out, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustNumpyT(del bool)(retVal *Tensor) {
retVal, err := ts.NumpyT(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOneHot(numClasses int64, del bool)(retVal *Tensor) {
retVal, err := ts.OneHot(numClasses, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Ones(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOnesLike(del bool)(retVal *Tensor) {
retVal, err := ts.OnesLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustOnesOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := OnesOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOrgqr(input2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Orgqr(input2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOrgqrOut(out *Tensor, input2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.OrgqrOut(out, input2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOrmqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor) {
retVal, err := ts.Ormqr(input2, input3, left, transpose, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor) {
retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOuter(vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Outer(vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOuterOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.OuterOut(out, vec2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustOutputNr(del bool)(retVal int64) {
retVal, err := ts.OutputNr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPadSequence(sequences []Tensor, batchFirst bool, paddingValue float64)(retVal *Tensor) {
retVal, err := PadSequence(sequences, batchFirst, paddingValue)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool)(retVal *Tensor) {
retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPdist(p float64, del bool)(retVal *Tensor) {
retVal, err := ts.Pdist(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPermute(dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Permute(dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPinMemory(device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.PinMemory(device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPinverse(rcond float64, del bool)(retVal *Tensor) {
retVal, err := ts.Pinverse(rcond, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPixelShuffle(upscaleFactor int64, del bool)(retVal *Tensor) {
retVal, err := ts.PixelShuffle(upscaleFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPixelUnshuffle(downscaleFactor int64, del bool)(retVal *Tensor) {
retVal, err := ts.PixelUnshuffle(downscaleFactor, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPoisson(del bool)(retVal *Tensor) {
retVal, err := ts.Poisson(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal *Tensor) {
retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPolar(abs *Tensor, angle *Tensor)(retVal *Tensor) {
retVal, err := Polar(abs, angle)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPolarOut(out *Tensor, abs *Tensor, angle *Tensor)(retVal *Tensor) {
retVal, err := PolarOut(out, abs, angle)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPolygamma(n int64, del bool)(retVal *Tensor) {
retVal, err := ts.Polygamma(n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPolygamma_(n int64)() {
err := ts.Polygamma_(n)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor) {
retVal, err := ts.PolygammaOut(out, n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPositive(del bool)(retVal *Tensor) {
retVal, err := ts.Positive(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPow(exponent *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Pow(exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPow_(exponent *Scalar)() {
err := ts.Pow_(exponent)
if err != nil { log.Fatal(err) }
return
}
func MustPowScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) {
retVal, err := PowScalar(selfScalar, exponent)
if err != nil { log.Fatal(err) }
return retVal
}
func MustPowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) {
retVal, err := PowScalarOut(out, selfScalar, exponent)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPowTensor_(exponent *Tensor)() {
err := ts.PowTensor_(exponent)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustPowTensorScalar(exponent *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.PowTensorScalar(exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPowTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.PowTensorScalarOut(out, exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPowTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.PowTensorTensorOut(out, exponent, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPrelu(weight *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Prelu(weight, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPreluBackward(gradOutput *Tensor, weight *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.PreluBackward(gradOutput, weight, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustProd(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Prod(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ProdDimInt(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ProdIntOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPut(index *Tensor, source *Tensor, accumulate bool, del bool)(retVal *Tensor) {
retVal, err := ts.Put(index, source, accumulate, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustPut_(index *Tensor, source *Tensor, accumulate bool)() {
err := ts.Put_(index, source, accumulate)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustQPerChannelAxis(del bool)(retVal int64) {
retVal, err := ts.QPerChannelAxis(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQPerChannelScales(del bool)(retVal *Tensor) {
retVal, err := ts.QPerChannelScales(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQPerChannelZeroPoints(del bool)(retVal *Tensor) {
retVal, err := ts.QPerChannelZeroPoints(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQScale(del bool)(retVal float64) {
retVal, err := ts.QScale(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQZeroPoint(del bool)(retVal int64) {
retVal, err := ts.QZeroPoint(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQr(some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Qr(some, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustQrQ(q *Tensor, r *Tensor, some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.QrQ(q, r, some, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustQuantile(q *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.Quantile(q, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileNew(q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileNew(q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileNewOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileNewOut(out, q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileNewScalar(q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileNewScalar(q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileNewScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, interpolation string, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileNewScalarOut(out, q, dim, keepdim, interpolation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileOut(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileOut(out, q, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileScalar(q float64, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileScalar(q, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantileScalarOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantileScalarOut(out, q, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizePerTensorTensorQparams(scale, zeroPoint, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor) {
retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) {
retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedLstmCell(input *Tensor, hx []Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := QuantizedLstmCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustQuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizedMaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) {
retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) {
retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustQuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) {
retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRad2deg(del bool)(retVal *Tensor) {
retVal, err := ts.Rad2deg(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRad2deg_()() {
err := ts.Rad2deg_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRad2degOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Rad2degOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Rand(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandLike(del bool)(retVal *Tensor) {
retVal, err := ts.RandLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := RandOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Randint(high, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandintLike(high int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandintLike(high, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandintLikeLowDtype(low int64, high int64, del bool)(retVal *Tensor) {
retVal, err := ts.RandintLikeLowDtype(low, high, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := RandintLow(low, high, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandintLowOut(out *Tensor, low int64, high int64, size []int64)(retVal *Tensor) {
retVal, err := RandintLowOut(out, low, high, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandintOut(out *Tensor, high int64, size []int64)(retVal *Tensor) {
retVal, err := RandintOut(out, high, size)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Randn(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandnLike(del bool)(retVal *Tensor) {
retVal, err := ts.RandnLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandnOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := RandnOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRandom_()() {
err := ts.Random_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRandomFrom_(from int64, to []int64)() {
err := ts.RandomFrom_(from, to)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRandomTo_(to int64)() {
err := ts.RandomTo_(to)
if err != nil { log.Fatal(err) }
return
}
func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Randperm(n, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRandpermOut(out *Tensor, n int64)(retVal *Tensor) {
retVal, err := RandpermOut(out, n)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRange(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Range(start, end, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRangeOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor) {
retVal, err := RangeOut(out, start, end)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := RangeStep(start, end, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRavel(del bool)(retVal *Tensor) {
retVal, err := ts.Ravel(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReal(del bool)(retVal *Tensor) {
retVal, err := ts.Real(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReciprocal(del bool)(retVal *Tensor) {
retVal, err := ts.Reciprocal(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReciprocal_()() {
err := ts.Reciprocal_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustReciprocalOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ReciprocalOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad1d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad1d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad1dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad1dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad2d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad2d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad2dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad2dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad3d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad3d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad3dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad3dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReflectionPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReflectionPad3dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRelu(del bool)(retVal *Tensor) {
retVal, err := ts.Relu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRelu6(del bool)(retVal *Tensor) {
retVal, err := ts.Relu6(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRelu6_()() {
err := ts.Relu6_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRelu_()() {
err := ts.Relu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRemainder(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Remainder(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRemainder_(other *Scalar)() {
err := ts.Remainder_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRemainderScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.RemainderScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRemainderScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := RemainderScalarTensor(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRemainderTensor(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RemainderTensor(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRemainderTensor_(other *Tensor)() {
err := ts.RemainderTensor_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRemainderTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RemainderTensorOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRenorm(p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Renorm(p, dim, maxnorm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRenorm_(p *Scalar, dim int64, maxnorm *Scalar)() {
err := ts.Renorm_(p, dim, maxnorm)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.RenormOut(out, p, dim, maxnorm, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRepeat(repeats []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Repeat(repeats, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRepeatInterleave(repeats *Tensor, outputSize []int64)(retVal *Tensor) {
retVal, err := RepeatInterleave(repeats, outputSize)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRepeatInterleaveSelfInt(repeats int64, dim []int64, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.RepeatInterleaveSelfInt(repeats, dim, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRepeatInterleaveSelfTensor(repeats *Tensor, dim []int64, outputSize []int64, del bool)(retVal *Tensor) {
retVal, err := ts.RepeatInterleaveSelfTensor(repeats, dim, outputSize, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad1d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad1d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad1dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad1dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad2d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad2d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad2dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad2dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad3d(padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad3d(padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad3dBackwardGradInput(gradInput, gradOutput, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReplicationPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.ReplicationPad3dOut(out, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRequiresGrad_(requiresGrad bool)() {
err := ts.RequiresGrad_(requiresGrad)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustReshape(shape []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Reshape(shape, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustReshapeAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ReshapeAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResize_(size []int64)() {
err := ts.Resize_(size)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustResizeAs_(theTemplate *Tensor)() {
err := ts.ResizeAs_(theTemplate)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustResizeAsSparse_(theTemplate *Tensor)() {
err := ts.ResizeAsSparse_(theTemplate)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustResolveConj(del bool)(retVal *Tensor) {
retVal, err := ts.ResolveConj(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustResolveNeg(del bool)(retVal *Tensor) {
retVal, err := ts.ResolveNeg(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRetainsGrad(del bool)(retVal bool) {
retVal, err := ts.RetainsGrad(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRnnRelu(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := RnnRelu(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) {
retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRnnReluData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := RnnReluData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustRnnTanh(input *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := RnnTanh(input, hx, params, hasBiases, numLayers, dropout, train, bidirectional, batchFirst)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) {
retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRnnTanhData(data *Tensor, batchSizes *Tensor, hx *Tensor, params []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := RnnTanhData(data, batchSizes, hx, params, hasBiases, numLayers, dropout, train, bidirectional)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustRoll(shifts []int64, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Roll(shifts, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRot90(k int64, dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Rot90(k, dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRound(del bool)(retVal *Tensor) {
retVal, err := ts.Round(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRound_()() {
err := ts.Round_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRoundOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RoundOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRowStack(tensors []Tensor)(retVal *Tensor) {
retVal, err := RowStack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustRowStackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) {
retVal, err := RowStackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRrelu(training bool, del bool)(retVal *Tensor) {
retVal, err := ts.Rrelu(training, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRrelu_(training bool)() {
err := ts.Rrelu_(training)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRreluWithNoise(noise *Tensor, training bool, del bool)(retVal *Tensor) {
retVal, err := ts.RreluWithNoise(noise, training, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRreluWithNoise_(noise *Tensor, training bool)() {
err := ts.RreluWithNoise_(noise, training)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool)(retVal *Tensor) {
retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool)(retVal *Tensor) {
retVal, err := ts.RreluWithNoiseOut(out, noise, training, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsqrt(del bool)(retVal *Tensor) {
retVal, err := ts.Rsqrt(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsqrt_()() {
err := ts.Rsqrt_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustRsqrtOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.RsqrtOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsub(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Rsub(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustRsubScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.RsubScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := ScalarTensor(s, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatter(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Scatter(dim, index, src, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatter_(dim int64, index *Tensor, src *Tensor)() {
err := ts.Scatter_(dim, index, src)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterAdd(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterAdd(dim, index, src, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterAdd_(dim int64, index *Tensor, src *Tensor)() {
err := ts.ScatterAdd_(dim, index, src)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterAddOut(out, dim, index, src, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterReduce(dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterReduce(dim, index, src, reduce, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce string)() {
err := ts.ScatterReduce_(dim, index, src, reduce)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Tensor, reduce string, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterReduceOut(out, dim, index, src, reduce, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterSrcOut(out, dim, index, src, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterValue(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterValue(dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterValue_(dim int64, index *Tensor, value *Scalar)() {
err := ts.ScatterValue_(dim, index, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterValueOut(out *Tensor, dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterValueOut(out, dim, index, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterValueReduce(dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterValueReduce(dim, index, value, reduce, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustScatterValueReduce_(dim int64, index *Tensor, value *Scalar, reduce string)() {
err := ts.ScatterValueReduce_(dim, index, value, reduce)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, value *Scalar, reduce string, del bool)(retVal *Tensor) {
retVal, err := ts.ScatterValueReduceOut(out, dim, index, value, reduce, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSearchsorted(sortedSequence *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) {
retVal, err := ts.Searchsorted(sortedSequence, outInt32, right, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSearchsortedScalar(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool)(retVal *Tensor) {
retVal, err := SearchsortedScalar(sortedSequence, selfScalar, outInt32, right)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSearchsortedTensorOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) {
retVal, err := ts.SearchsortedTensorOut(out, sortedSequence, outInt32, right, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor, axis int64, unsafety bool, initial *Scalar)(retVal *Tensor) {
retVal, err := SegmentReduce(data, reduce, lengths, indices, axis, unsafety, initial)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelect(dim int64, index int64, del bool)(retVal *Tensor) {
retVal, err := ts.Select(dim, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int64)(retVal *Tensor) {
retVal, err := SelectBackward(gradOutput, inputSizes, dim, index)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelu(del bool)(retVal *Tensor) {
retVal, err := ts.Selu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSelu_()() {
err := ts.Selu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSet_()() {
err := ts.Set_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSetRequiresGrad(r bool, del bool)(retVal *Tensor) {
retVal, err := ts.SetRequiresGrad(r, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSetSourceTensor_(source *Tensor)() {
err := ts.SetSourceTensor_(source)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSgn(del bool)(retVal *Tensor) {
retVal, err := ts.Sgn(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSgn_()() {
err := ts.Sgn_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSgnOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SgnOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSigmoid(del bool)(retVal *Tensor) {
retVal, err := ts.Sigmoid(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSigmoid_()() {
err := ts.Sigmoid_()
if err != nil { log.Fatal(err) }
return
}
func MustSigmoidBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor) {
retVal, err := SigmoidBackward(gradOutput, output)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor) {
retVal, err := SigmoidBackwardGradInput(gradInput, gradOutput, output)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSigmoidOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SigmoidOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSign(del bool)(retVal *Tensor) {
retVal, err := ts.Sign(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSign_()() {
err := ts.Sign_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSignOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SignOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSignbit(del bool)(retVal *Tensor) {
retVal, err := ts.Signbit(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSignbitOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SignbitOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSilu(del bool)(retVal *Tensor) {
retVal, err := ts.Silu(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSilu_()() {
err := ts.Silu_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSiluBackward(gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SiluBackward(gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SiluBackwardGradInput(gradInput, gradOutput, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSiluOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SiluOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSin(del bool)(retVal *Tensor) {
retVal, err := ts.Sin(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSin_()() {
err := ts.Sin_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSinOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SinOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSinc(del bool)(retVal *Tensor) {
retVal, err := ts.Sinc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSinc_()() {
err := ts.Sinc_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSincOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SincOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSinh(del bool)(retVal *Tensor) {
retVal, err := ts.Sinh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSinh_()() {
err := ts.Sinh_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSinhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SinhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlice(dim int64, start []int64, end []int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.Slice(dim, start, end, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64)(retVal *Tensor) {
retVal, err := SliceBackward(gradOutput, inputSizes, dim, start, end, step)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Slogdet(del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmm(mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Smm(mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) {
retVal, err := ts.SmoothL1Loss(target, reduction, beta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) {
retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, beta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) {
retVal, err := ts.SmoothL1LossBackwardGradInput(gradInput, gradOutput, target, reduction, beta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) {
retVal, err := ts.SmoothL1LossOut(out, target, reduction, beta, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.SoftMarginLoss(target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.SoftMarginLossBackwardGradInput(gradInput, gradOutput, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) {
retVal, err := ts.SoftMarginLossOut(out, target, reduction, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Softmax(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftplus(del bool)(retVal *Tensor) {
retVal, err := ts.Softplus(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, output, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SoftplusBackwardGradInput(gradInput, gradOutput, beta, threshold, output, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftplusOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SoftplusOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftshrink(del bool)(retVal *Tensor) {
retVal, err := ts.Softshrink(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SoftshrinkBackwardGradInput(gradInput, gradOutput, lambd, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSoftshrinkOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SoftshrinkOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSolve(a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Solve(a, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSolveSolution(solution *Tensor, lu *Tensor, a *Tensor, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SolveSolution(solution, lu, a, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSort(dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Sort(dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSortStable(stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SortStable(stable, dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSortValues(values *Tensor, indices *Tensor, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SortValues(values, indices, dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSortValuesStable(values *Tensor, indices *Tensor, stable bool, dim int64, descending bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SortValuesStable(values, indices, stable, dim, descending, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCooTensor(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCooTensorIndices(indices, values, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCooTensorIndicesSize(indices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCsrTensor(crowIndices, colIndices, values, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := SparseCsrTensorCrowColValueSize(crowIndices, colIndices, values, size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseDim(del bool)(retVal int64) {
retVal, err := ts.SparseDim(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseMask(mask *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SparseMask(mask, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64)() {
err := ts.SparseResize_(size, sparseDim, denseDim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)() {
err := ts.SparseResizeAndClear_(size, sparseDim, denseDim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSpecialDigamma(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialDigamma(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialDigammaOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialDigammaOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialEntr(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialEntr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialEntrOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialEntrOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErf(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErf(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfc(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfcOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfcOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfcx(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfcx(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfcxOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfcxOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfinv(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfinv(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialErfinvOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialErfinvOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExp2(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExp2(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExp2Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExp2Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExpit(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExpit(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExpitOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExpitOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExpm1(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExpm1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialExpm1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialExpm1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammainc(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammainc(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammaincOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammaincOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammaincc(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammaincc(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammainccOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammainccOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammaln(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammaln(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialGammalnOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialGammalnOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI0(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI0(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI0Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI0Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI0e(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI0e(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI0eOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI0eOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI1(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI1(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI1Out(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI1Out(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI1e(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI1e(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialI1eOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialI1eOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLog1p(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLog1p(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLog1pOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLog1pOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogSoftmax(dim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogit(eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogit(eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogitOut(out, eps, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogsumexp(dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialLogsumexpOut(out, dim, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialMultigammaln(p int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialMultigammaln(p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialMultigammalnOut(out *Tensor, p int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialMultigammalnOut(out, p, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialNdtr(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialNdtr(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialNdtrOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialNdtrOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialNdtri(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialNdtri(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialNdtriOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialNdtriOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialPolygamma(n int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialPolygamma(n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialPolygammaOut(out, n, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialPsi(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialPsi(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialPsiOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialPsiOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialRound(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialRound(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialRoundOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialRoundOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialSinc(del bool)(retVal *Tensor) {
retVal, err := ts.SpecialSinc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialSincOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialSincOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlog1py(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlog1py(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlog1pyOtherScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlog1pyOtherScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlog1pyOtherScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlog1pyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlog1pyOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialXlog1pySelfScalar(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialXlog1pySelfScalarOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlogy(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlogy(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlogyOtherScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlogyOtherScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlogyOtherScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialXlogyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialXlogyOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialXlogySelfScalar(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialXlogySelfScalarOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialZeta(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialZeta(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialZetaOtherScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialZetaOtherScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialZetaOtherScalarOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSpecialZetaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SpecialZetaOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialZetaSelfScalar(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func MustSpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := SpecialZetaSelfScalarOut(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqrt(del bool)(retVal *Tensor) {
retVal, err := ts.Sqrt(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqrt_()() {
err := ts.Sqrt_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSqrtOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SqrtOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSquare(del bool)(retVal *Tensor) {
retVal, err := ts.Square(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSquare_()() {
err := ts.Square_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSquareOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SquareOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueeze(del bool)(retVal *Tensor) {
retVal, err := ts.Squeeze(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueeze_()() {
err := ts.Squeeze_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSqueezeDim(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.SqueezeDim(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSqueezeDim_(dim int64)() {
err := ts.SqueezeDim_(dim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSspaddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Sspaddmm(mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SspaddmmOut(out, mat1, mat2, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustStack(tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := Stack(tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustStackOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) {
retVal, err := StackOut(out, tensors, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStd(unbiased bool, del bool)(retVal *Tensor) {
retVal, err := ts.Std(unbiased, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStdCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.StdCorrection(dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStdCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.StdCorrectionOut(out, dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStdDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.StdDim(dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStdMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.StdMean(unbiased, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustStdMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.StdMeanCorrection(dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustStdMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.StdMeanDim(dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustStdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustStft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool)(retVal *Tensor) {
retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, returnComplex, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSub(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Sub(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSub_(other *Tensor)() {
err := ts.Sub_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSubOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SubOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SubScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubScalar_(other *Scalar)() {
err := ts.SubScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSubtract(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Subtract(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubtract_(other *Tensor)() {
err := ts.Subtract_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSubtractOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.SubtractOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubtractScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.SubtractScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSubtractScalar_(other *Scalar)() {
err := ts.SubtractScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSum(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Sum(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSumDimIntlist(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SumDimIntlist(dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSumIntlistOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.SumIntlistOut(out, dim, keepdim, dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSumToSize(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.SumToSize(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSvd(some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.Svd(some, computeUv, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustSvdU(u *Tensor, s *Tensor, v *Tensor, some bool, computeUv bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.SvdU(u, s, v, some, computeUv, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustSwapaxes(axis0 int64, axis1 int64, del bool)(retVal *Tensor) {
retVal, err := ts.Swapaxes(axis0, axis1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSwapaxes_(axis0 int64, axis1 int64)() {
err := ts.Swapaxes_(axis0, axis1)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSwapdims(dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts.Swapdims(dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustSwapdims_(dim0 int64, dim1 int64)() {
err := ts.Swapdims_(dim0, dim1)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustSymeig(eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Symeig(eigenvectors, upper, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustSymeigE(e *Tensor, v *Tensor, eigenvectors bool, upper bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.SymeigE(e, v, eigenvectors, upper, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustT(del bool)(retVal *Tensor) {
retVal, err := ts.T(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustT_()() {
err := ts.T_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTake(index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Take(index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTakeAlongDim(indices *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.TakeAlongDim(indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del bool)(retVal *Tensor) {
retVal, err := ts.TakeAlongDimOut(out, indices, dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTakeOut(out *Tensor, index *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TakeOut(out, index, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTan(del bool)(retVal *Tensor) {
retVal, err := ts.Tan(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTan_()() {
err := ts.Tan_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTanOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TanOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTanh(del bool)(retVal *Tensor) {
retVal, err := ts.Tanh(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTanh_()() {
err := ts.Tanh_()
if err != nil { log.Fatal(err) }
return
}
func MustTanhBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor) {
retVal, err := TanhBackward(gradOutput, output)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor) {
retVal, err := TanhBackwardGradInput(gradInput, gradOutput, output)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTanhOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TanhOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor) {
retVal, err := ts.TensordotOut(out, other, dimsSelf, dimsOther, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustThreshold(threshold *Scalar, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.Threshold(threshold, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustThreshold_(threshold *Scalar, value *Scalar)() {
err := ts.Threshold_(threshold, value)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ThresholdBackward(gradOutput, threshold, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ThresholdBackwardGradInput(gradInput, gradOutput, threshold, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.ThresholdOut(out, threshold, value, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTile(dims []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Tile(dims, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTo(device gotch.Device, del bool)(retVal *Tensor) {
retVal, err := ts.To(device, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToDense(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ToDense(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustToDenseBackward(grad *Tensor, input *Tensor)(retVal *Tensor) {
retVal, err := ToDenseBackward(grad, input)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToDevice(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToDevice(device, dtype, nonBlocking, copy, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToDtype(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToDtype(dtype, nonBlocking, copy, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToDtypeLayout(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToDtypeLayout(optionsKind, optionsDevice, nonBlocking, copy, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToMkldnn(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ToMkldnn(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustToMkldnnBackward(grad *Tensor, input *Tensor)(retVal *Tensor) {
retVal, err := ToMkldnnBackward(grad, input)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToOther(other *Tensor, nonBlocking bool, copy bool, del bool)(retVal *Tensor) {
retVal, err := ts.ToOther(other, nonBlocking, copy, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToSparse(del bool)(retVal *Tensor) {
retVal, err := ts.ToSparse(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustToSparseSparseDim(sparseDim int64, del bool)(retVal *Tensor) {
retVal, err := ts.ToSparseSparseDim(sparseDim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTopk(k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.Topk(k, dim, largest, sorted, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustTopkValues(values *Tensor, indices *Tensor, k int64, dim int64, largest bool, sorted bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.TopkValues(values, indices, k, dim, largest, sorted, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustTotype(scalarType gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.Totype(scalarType, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrace(del bool)(retVal *Tensor) {
retVal, err := ts.Trace(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTraceBackward(grad *Tensor, sizes []int64)(retVal *Tensor) {
retVal, err := TraceBackward(grad, sizes)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor) {
retVal, err := ts.Transpose(dim0, dim1, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTranspose_(dim0 int64, dim1 int64)() {
err := ts.Transpose_(dim0, dim1)
if err != nil { log.Fatal(err) }
return
}
func MustTrapezoid(y *Tensor, dim int64)(retVal *Tensor) {
retVal, err := Trapezoid(y, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) {
retVal, err := TrapezoidX(y, x, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTrapz(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) {
retVal, err := Trapz(y, x, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTrapzDx(y *Tensor, dx float64, dim int64)(retVal *Tensor) {
retVal, err := TrapzDx(y, dx, dim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTriangularSolve(a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.TriangularSolve(a, upper, transpose, unitriangular, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustTriangularSolveX(x *Tensor, m *Tensor, a *Tensor, upper bool, transpose bool, unitriangular bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.TriangularSolveX(x, m, a, upper, transpose, unitriangular, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustTril(diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.Tril(diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTril_(diagonal int64)() {
err := ts.Tril_(diagonal)
if err != nil { log.Fatal(err) }
return
}
func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrilOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.TrilOut(out, diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustTripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal *Tensor) {
retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTriu(diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.Triu(diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTriu_(diagonal int64)() {
err := ts.Triu_(diagonal)
if err != nil { log.Fatal(err) }
return
}
func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTriuOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) {
retVal, err := ts.TriuOut(out, diagonal, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrueDivide(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TrueDivide(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrueDivide_(other *Tensor)() {
err := ts.TrueDivide_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTrueDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TrueDivideOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrueDivideScalar(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.TrueDivideScalar(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrueDivideScalar_(other *Scalar)() {
err := ts.TrueDivideScalar_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTrunc(del bool)(retVal *Tensor) {
retVal, err := ts.Trunc(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTrunc_()() {
err := ts.Trunc_()
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustTruncOut(out *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TruncOut(out, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustTypeAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.TypeAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnflatten(dim int64, sizes []int64, del bool)(retVal *Tensor) {
retVal, err := ts.Unflatten(dim, sizes, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnfold(dimension int64, size int64, step int64, del bool)(retVal *Tensor) {
retVal, err := ts.Unfold(dimension, size, step, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64)(retVal *Tensor) {
retVal, err := UnfoldBackward(gradIn, inputSizes, dim, size, step)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUniform_(from float64, to float64)() {
err := ts.Uniform_(from, to)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustUniqueConsecutive(returnInverse bool, returnCounts bool, dim []int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueConsecutive(returnInverse, returnCounts, dim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUniqueDim(dim int64, sorted bool, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueDim(dim, sorted, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUniqueDimConsecutive(dim int64, returnInverse bool, returnCounts bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor) {
retVal0, retVal1, retVal2, err := ts.UniqueDimConsecutive(dim, returnInverse, returnCounts, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1, retVal2
}
func(ts *Tensor) MustUnsqueeze(dim int64, del bool)(retVal *Tensor) {
retVal, err := ts.Unsqueeze(dim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUnsqueeze_(dim int64)() {
err := ts.Unsqueeze_(dim)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBicubic2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleBicubic2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleBilinear2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleBilinear2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor) {
retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleLinear1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor) {
retVal, err := UpsampleLinear1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest1d(outputSize, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest1dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest1dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scales)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest2dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest2dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleNearest3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleNearest3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func MustUpsampleTrilinear3dBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) {
retVal, err := UpsampleTrilinear3dBackwardGradInput(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustUpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) {
retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool)(retVal *Tensor) {
retVal, err := ValueSelectingReductionBackward(grad, dim, indices, sizes, keepdim)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustValues(del bool)(retVal *Tensor) {
retVal, err := ts.Values(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustVander(x *Tensor, n []int64, increasing bool)(retVal *Tensor) {
retVal, err := Vander(x, n, increasing)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVar(unbiased bool, del bool)(retVal *Tensor) {
retVal, err := ts.Var(unbiased, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVarCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.VarCorrection(dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVarCorrectionOut(out *Tensor, dim []int64, correction []int64, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.VarCorrectionOut(out, dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVarDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.VarDim(dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVarMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.VarMean(unbiased, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustVarMeanCorrection(dim []int64, correction []int64, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.VarMeanCorrection(dim, correction, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustVarMeanDim(dim []int64, unbiased bool, keepdim bool, del bool)(retVal0 *Tensor, retVal1 *Tensor) {
retVal0, retVal1, err := ts.VarMeanDim(dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal0, retVal1
}
func(ts *Tensor) MustVarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) {
retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVdot(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Vdot(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustVdotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.VdotOut(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustView(size []int64, del bool)(retVal *Tensor) {
retVal, err := ts.View(size, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAs(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.ViewAs(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAsComplex(del bool)(retVal *Tensor) {
retVal, err := ts.ViewAsComplex(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewAsReal(del bool)(retVal *Tensor) {
retVal, err := ts.ViewAsReal(del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustViewDtype(dtype gotch.DType, del bool)(retVal *Tensor) {
retVal, err := ts.ViewDtype(dtype, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustVstack(tensors []Tensor)(retVal *Tensor) {
retVal, err := Vstack(tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustVstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) {
retVal, err := VstackOut(out, tensors)
if err != nil { log.Fatal(err) }
return retVal
}
func MustWhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar)(retVal *Tensor) {
retVal, err := WhereScalar(condition, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustWhereScalarother(condition *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.WhereScalarother(condition, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustWhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := WhereScalarself(condition, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustWhereSelf(condition *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.WhereSelf(condition, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogy(other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.Xlogy(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogy_(other *Tensor)() {
err := ts.Xlogy_(other)
if err != nil { log.Fatal(err) }
return
}
func(ts *Tensor) MustXlogyOutscalarOther(out *Tensor, other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.XlogyOutscalarOther(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustXlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := XlogyOutscalarSelf(out, selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogyOuttensor(out *Tensor, other *Tensor, del bool)(retVal *Tensor) {
retVal, err := ts.XlogyOuttensor(out, other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogyScalarOther(other *Scalar, del bool)(retVal *Tensor) {
retVal, err := ts.XlogyScalarOther(other, del)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustXlogyScalarOther_(other *Scalar)() {
err := ts.XlogyScalarOther_(other)
if err != nil { log.Fatal(err) }
return
}
func MustXlogyScalarSelf(selfScalar *Scalar, other *Tensor)(retVal *Tensor) {
retVal, err := XlogyScalarSelf(selfScalar, other)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustZero_()() {
err := ts.Zero_()
if err != nil { log.Fatal(err) }
return
}
func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) {
retVal, err := Zeros(size, optionsKind, optionsDevice)
if err != nil { log.Fatal(err) }
return retVal
}
func(ts *Tensor) MustZerosLike(del bool)(retVal *Tensor) {
retVal, err := ts.ZerosLike(del)
if err != nil { log.Fatal(err) }
return retVal
}
func MustZerosOut(out *Tensor, size []int64)(retVal *Tensor) {
retVal, err := ZerosOut(out, size)
if err != nil { log.Fatal(err) }
return retVal
}
// End of implementing Tensor =================================