From 3cd8d8560fc54ef1cf55bf37b8c283faad33f8d4 Mon Sep 17 00:00:00 2001 From: sugarme Date: Wed, 26 Jul 2023 23:19:38 +1000 Subject: [PATCH] generated newTensor() for GC collection --- gen/gen.ml | 10 +- ts/tensor-generated.go | 7492 ++++++++++++++++++++++++++-------------- 2 files changed, 4983 insertions(+), 2519 deletions(-) diff --git a/gen/gen.ml b/gen/gen.ml index 6c8f441..bc72301 100644 --- a/gen/gen.ml +++ b/gen/gen.ml @@ -883,6 +883,7 @@ let write_wrapper funcs filename = pm "\n\n" ; pm "import(\n" ; pm " \"unsafe\"\n" ; + pm " \"fmt\"\n" ; pm "\n" ; pm " \"github.com/sugarme/gotch\"\n" ; pm " lib \"github.com/sugarme/gotch/libtch\"\n" ; @@ -982,12 +983,13 @@ let write_wrapper funcs filename = pm " %s" (Func.go_binding_body func) ; pm " %s(ptr, %s)\n" cfunc_name (Func.go_binding_args func) ; pm " if err = TorchErr(); err != nil {\n" ; + pm " err = fmt.Errorf(\"%s() failed: %%w\", err)\n" gofunc_name; pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; pm " }\n" ; (* NOTE. if in_place method, no retVal return *) if not (Func.is_inplace func) then - pm " retVal = &Tensor{ctensor: *ptr}\n" + pm " retVal = newTensor(*ptr, \"%s\")\n" gofunc_name else pm " ts.ctensor = *ptr\n" ; pm " \n" ; pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; @@ -1024,13 +1026,14 @@ let write_wrapper funcs filename = pm " %s(ctensorPtr0, %s)\n" cfunc_name (Func.go_binding_args func) ; pm " if err = TorchErr(); err != nil {\n" ; + pm " err = fmt.Errorf(\"%s() failed: %%w\", err)\n" gofunc_name; pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; pm " }\n" ; (* NOTE. if in_place method, no retVal return *) if not (Func.is_inplace func) then for i = 0 to ntensors - 1 do - pm " retVal%d = &Tensor{ctensor: *ctensorPtr%d}\n" i i + pm " retVal%d = newTensor(*ctensorPtr%d, \"%s_%d\")\n" i i gofunc_name i done else pm " ts.ctensor = *ptr\n" ; pm " \n" ; @@ -1052,6 +1055,7 @@ let write_wrapper funcs filename = pm " %s" (Func.go_binding_body func) ; pm " retVal = %s(%s)\n" cfunc_name (Func.go_binding_args func) ; pm " if err = TorchErr(); err != nil {\n" ; + pm " err = fmt.Errorf(\"%s() failed: %%w\", err)\n" gofunc_name; pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; pm " }\n" ; @@ -1073,6 +1077,7 @@ let write_wrapper funcs filename = pm " %s" (Func.go_binding_body func) ; pm " retVal = %s(%s)\n" cfunc_name (Func.go_binding_args func) ; pm " if err = TorchErr(); err != nil {\n" ; + pm " err = fmt.Errorf(\"%s() failed: %%w\", err)\n" gofunc_name; pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; pm " }\n" ; @@ -1094,6 +1099,7 @@ let write_wrapper funcs filename = pm " %s" (Func.go_binding_body func) ; pm " retVal = %s(%s)\n" cfunc_name (Func.go_binding_args func) ; pm " if err = TorchErr(); err != nil {\n" ; + pm " err = fmt.Errorf(\"%s() failed: %%w\", err)\n" gofunc_name; pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; pm " }\n" ; diff --git a/ts/tensor-generated.go b/ts/tensor-generated.go index 831583d..1cf7c7c 100644 --- a/ts/tensor-generated.go +++ b/ts/tensor-generated.go @@ -7,6 +7,7 @@ import "C" import( "unsafe" + "fmt" "github.com/sugarme/gotch" lib "github.com/sugarme/gotch/libtch" @@ -21,6 +22,7 @@ func(ts *Tensor) __And_(other *Scalar)(err error) { lib.Atg__And_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__And_() failed: %w", err) return err } ts.ctensor = *ptr @@ -36,6 +38,7 @@ func(ts *Tensor) __AndTensor_(other *Tensor)(err error) { lib.Atg__AndTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__AndTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -51,6 +54,7 @@ func(ts *Tensor) __Iand_(other *Scalar)(err error) { lib.Atg__Iand_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Iand_() failed: %w", err) return err } ts.ctensor = *ptr @@ -66,6 +70,7 @@ func(ts *Tensor) __IandTensor_(other *Tensor)(err error) { lib.Atg__IandTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__IandTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -81,6 +86,7 @@ func(ts *Tensor) __Ilshift_(other *Scalar)(err error) { lib.Atg__Ilshift_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Ilshift_() failed: %w", err) return err } ts.ctensor = *ptr @@ -96,6 +102,7 @@ func(ts *Tensor) __IlshiftTensor_(other *Tensor)(err error) { lib.Atg__IlshiftTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__IlshiftTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -111,6 +118,7 @@ func(ts *Tensor) __Ior_(other *Scalar)(err error) { lib.Atg__Ior_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Ior_() failed: %w", err) return err } ts.ctensor = *ptr @@ -126,6 +134,7 @@ func(ts *Tensor) __IorTensor_(other *Tensor)(err error) { lib.Atg__IorTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__IorTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -141,6 +150,7 @@ func(ts *Tensor) __Irshift_(other *Scalar)(err error) { lib.Atg__Irshift_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Irshift_() failed: %w", err) return err } ts.ctensor = *ptr @@ -156,6 +166,7 @@ func(ts *Tensor) __IrshiftTensor_(other *Tensor)(err error) { lib.Atg__IrshiftTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__IrshiftTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -171,6 +182,7 @@ func(ts *Tensor) __Ixor_(other *Scalar)(err error) { lib.Atg__Ixor_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Ixor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -186,6 +198,7 @@ func(ts *Tensor) __IxorTensor_(other *Tensor)(err error) { lib.Atg__IxorTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__IxorTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -201,6 +214,7 @@ func(ts *Tensor) __Lshift_(other *Scalar)(err error) { lib.Atg__Lshift_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Lshift_() failed: %w", err) return err } ts.ctensor = *ptr @@ -217,9 +231,10 @@ func(ts *Tensor) __LshiftScalarOut_(out *Tensor, other *Scalar, del bool)(retVal lib.Atg__LshiftScalarOut_(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__LshiftScalarOut_() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "__LshiftScalarOut_") return retVal, err } @@ -232,6 +247,7 @@ func(ts *Tensor) __LshiftTensor_(other *Tensor)(err error) { lib.Atg__LshiftTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__LshiftTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -248,9 +264,10 @@ func(ts *Tensor) __LshiftTensorOut_(out *Tensor, other *Tensor, del bool)(retVal lib.Atg__LshiftTensorOut_(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__LshiftTensorOut_() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "__LshiftTensorOut_") return retVal, err } @@ -263,6 +280,7 @@ func(ts *Tensor) __Or_(other *Scalar)(err error) { lib.Atg__Or_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Or_() failed: %w", err) return err } ts.ctensor = *ptr @@ -278,6 +296,7 @@ func(ts *Tensor) __OrTensor_(other *Tensor)(err error) { lib.Atg__OrTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__OrTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -293,6 +312,7 @@ func(ts *Tensor) __Rshift_(other *Scalar)(err error) { lib.Atg__Rshift_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Rshift_() failed: %w", err) return err } ts.ctensor = *ptr @@ -309,9 +329,10 @@ func(ts *Tensor) __RshiftScalarOut_(out *Tensor, other *Scalar, del bool)(retVal lib.Atg__RshiftScalarOut_(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__RshiftScalarOut_() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "__RshiftScalarOut_") return retVal, err } @@ -324,6 +345,7 @@ func(ts *Tensor) __RshiftTensor_(other *Tensor)(err error) { lib.Atg__RshiftTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__RshiftTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -340,9 +362,10 @@ func(ts *Tensor) __RshiftTensorOut_(out *Tensor, other *Tensor, del bool)(retVal lib.Atg__RshiftTensorOut_(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__RshiftTensorOut_() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "__RshiftTensorOut_") return retVal, err } @@ -355,6 +378,7 @@ func(ts *Tensor) __Xor_(other *Scalar)(err error) { lib.Atg__Xor_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("__Xor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -370,6 +394,7 @@ func(ts *Tensor) __XorTensor_(other *Tensor)(err error) { lib.Atg__XorTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("__XorTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -387,9 +412,10 @@ func(ts *Tensor) _AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor outputSizeLen := len(outputSize) lib.Atg_AdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AdaptiveAvgPool2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AdaptiveAvgPool2d") return retVal, err } @@ -403,9 +429,10 @@ func(ts *Tensor) _AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal lib.Atg_AdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AdaptiveAvgPool2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AdaptiveAvgPool2dBackward") return retVal, err } @@ -419,9 +446,10 @@ func(ts *Tensor) _AdaptiveAvgPool2dBackwardOut(out *Tensor, gradOutput *Tensor, lib.Atg_AdaptiveAvgPool2dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AdaptiveAvgPool2dBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AdaptiveAvgPool2dBackwardOut") return retVal, err } @@ -436,9 +464,10 @@ func(ts *Tensor) _AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool outputSizeLen := len(outputSize) lib.Atg_AdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AdaptiveAvgPool2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AdaptiveAvgPool2dOut") return retVal, err } @@ -453,9 +482,10 @@ func(ts *Tensor) _AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor outputSizeLen := len(outputSize) lib.Atg_AdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AdaptiveAvgPool3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AdaptiveAvgPool3d") return retVal, err } @@ -469,9 +499,10 @@ func(ts *Tensor) _AdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool)(retVal lib.Atg_AdaptiveAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AdaptiveAvgPool3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AdaptiveAvgPool3dBackward") return retVal, err } @@ -485,9 +516,10 @@ func(ts *Tensor) _AdaptiveAvgPool3dBackwardOut(out *Tensor, gradOutput *Tensor, lib.Atg_AdaptiveAvgPool3dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AdaptiveAvgPool3dBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AdaptiveAvgPool3dBackwardOut") return retVal, err } @@ -502,9 +534,10 @@ func(ts *Tensor) _AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool outputSizeLen := len(outputSize) lib.Atg_AdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AdaptiveAvgPool3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AdaptiveAvgPool3dOut") return retVal, err } @@ -518,9 +551,10 @@ func(ts *Tensor) _AddBatchDim(batchDim int64, level int64, del bool)(retVal *Ten lib.Atg_AddBatchDim(ptr, ts.ctensor, batchDim, level) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddBatchDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AddBatchDim") return retVal, err } @@ -534,9 +568,10 @@ func(ts *Tensor) _AddRelu(other *Tensor, del bool)(retVal *Tensor, err error) { lib.Atg_AddRelu(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddRelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AddRelu") return retVal, err } @@ -549,6 +584,7 @@ func(ts *Tensor) _AddRelu_(other *Tensor)(err error) { lib.Atg_AddRelu_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddRelu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -565,9 +601,10 @@ func(ts *Tensor) _AddReluOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.Atg_AddReluOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddReluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AddReluOut") return retVal, err } @@ -581,9 +618,10 @@ func(ts *Tensor) _AddReluScalar(other *Scalar, del bool)(retVal *Tensor, err err lib.Atg_AddReluScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddReluScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AddReluScalar") return retVal, err } @@ -596,6 +634,7 @@ func(ts *Tensor) _AddReluScalar_(other *Scalar)(err error) { lib.Atg_AddReluScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddReluScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -612,9 +651,10 @@ func(ts *Tensor) _AddReluScalarOut(out *Tensor, other *Scalar, del bool)(retVal lib.Atg_AddReluScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddReluScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AddReluScalarOut") return retVal, err } @@ -630,9 +670,10 @@ func(ts *Tensor) _AddmmActivation(mat1 *Tensor, mat2 *Tensor, useGelu bool, del if useGelu { cuseGelu = int32(1) } lib.Atg_AddmmActivation(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor, cuseGelu) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddmmActivation() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AddmmActivation") return retVal, err } @@ -648,9 +689,10 @@ func(ts *Tensor) _AddmmActivationOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, us if useGelu { cuseGelu = int32(1) } lib.Atg_AddmmActivationOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor, cuseGelu) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AddmmActivationOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AddmmActivationOut") return retVal, err } @@ -664,10 +706,11 @@ func(ts *Tensor) _Aminmax(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) lib.Atg_Aminmax(ctensorPtr0, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Aminmax() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_Aminmax_0") + retVal1 = newTensor(*ctensorPtr1, "_Aminmax_1") return retVal0, retVal1, err } @@ -683,10 +726,11 @@ func(ts *Tensor) _AminmaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, if keepdim { ckeepdim = int32(1) } lib.Atg_AminmaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AminmaxDim() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_AminmaxDim_0") + retVal1 = newTensor(*ctensorPtr1, "_AminmaxDim_1") return retVal0, retVal1, err } @@ -702,10 +746,11 @@ func(ts *Tensor) _AminmaxDimOut(out0 *Tensor, out1 *Tensor, dim int64, keepdim b if keepdim { ckeepdim = int32(1) } lib.Atg_AminmaxDimOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AminmaxDimOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_AminmaxDimOut_0") + retVal1 = newTensor(*ctensorPtr1, "_AminmaxDimOut_1") return retVal0, retVal1, err } @@ -719,10 +764,11 @@ func(ts *Tensor) _AminmaxOut(out0 *Tensor, out1 *Tensor, del bool)(retVal0 *Tens lib.Atg_AminmaxOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AminmaxOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_AminmaxOut_0") + retVal1 = newTensor(*ctensorPtr1, "_AminmaxOut_1") return retVal0, retVal1, err } @@ -736,10 +782,11 @@ func(ts *Tensor) _AmpUpdateScale(growthTracker *Tensor, foundInf *Tensor, scaleG lib.Atg_AmpUpdateScale(ctensorPtr0, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AmpUpdateScale() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_AmpUpdateScale_0") + retVal1 = newTensor(*ctensorPtr1, "_AmpUpdateScale_1") return retVal0, retVal1, err } @@ -752,6 +799,7 @@ func(ts *Tensor) _AmpUpdateScale_(growthTracker *Tensor, foundInf *Tensor, scale lib.Atg_AmpUpdateScale_(ptr, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AmpUpdateScale_() failed: %w", err) return err } ts.ctensor = *ptr @@ -768,9 +816,10 @@ func(ts *Tensor) _AmpUpdateScaleOut(out *Tensor, growthTracker *Tensor, foundInf lib.Atg_AmpUpdateScaleOut(ptr, out.ctensor, ts.ctensor, growthTracker.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AmpUpdateScaleOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AmpUpdateScaleOut") return retVal, err } @@ -788,9 +837,10 @@ ccpuEnabled := int32(0) if cpuEnabled { ccpuEnabled = int32(1) } lib.Atg_AutocastToFullPrecision(ptr, ts.ctensor, ccudaEnabled, ccpuEnabled) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AutocastToFullPrecision() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AutocastToFullPrecision") return retVal, err } @@ -808,9 +858,10 @@ ccpuEnabled := int32(0) if cpuEnabled { ccpuEnabled = int32(1) } lib.Atg_AutocastToReducedPrecision(ptr, ts.ctensor, ccudaEnabled, ccpuEnabled, cudaDtype.CInt(), cpuDtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_AutocastToReducedPrecision() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_AutocastToReducedPrecision") return retVal, err } @@ -826,9 +877,10 @@ func(ts *Tensor) _CastByte(nonBlocking bool, del bool)(retVal *Tensor, err error if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CastByte(ptr, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CastByte() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CastByte") return retVal, err } @@ -844,9 +896,10 @@ func(ts *Tensor) _CastChar(nonBlocking bool, del bool)(retVal *Tensor, err error if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CastChar(ptr, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CastChar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CastChar") return retVal, err } @@ -862,9 +915,10 @@ func(ts *Tensor) _CastDouble(nonBlocking bool, del bool)(retVal *Tensor, err err if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CastDouble(ptr, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CastDouble() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CastDouble") return retVal, err } @@ -880,9 +934,10 @@ func(ts *Tensor) _CastFloat(nonBlocking bool, del bool)(retVal *Tensor, err erro if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CastFloat(ptr, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CastFloat() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CastFloat") return retVal, err } @@ -898,9 +953,10 @@ func(ts *Tensor) _CastHalf(nonBlocking bool, del bool)(retVal *Tensor, err error if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CastHalf(ptr, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CastHalf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CastHalf") return retVal, err } @@ -916,9 +972,10 @@ func(ts *Tensor) _CastInt(nonBlocking bool, del bool)(retVal *Tensor, err error) if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CastInt(ptr, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CastInt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CastInt") return retVal, err } @@ -934,9 +991,10 @@ func(ts *Tensor) _CastLong(nonBlocking bool, del bool)(retVal *Tensor, err error if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CastLong(ptr, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CastLong() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CastLong") return retVal, err } @@ -952,9 +1010,10 @@ func(ts *Tensor) _CastShort(nonBlocking bool, del bool)(retVal *Tensor, err erro if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CastShort(ptr, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CastShort() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CastShort") return retVal, err } @@ -967,9 +1026,10 @@ func _CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tens lib.Atg_CdistBackward(ptr, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CdistBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CdistBackward") return retVal, err } @@ -982,9 +1042,10 @@ func _CdistBackwardOut(out *Tensor, grad *Tensor, x1 *Tensor, x2 *Tensor, p floa lib.Atg_CdistBackwardOut(ptr, out.ctensor, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CdistBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CdistBackwardOut") return retVal, err } @@ -1000,9 +1061,10 @@ func(ts *Tensor) _CholeskySolveHelper(a *Tensor, upper bool, del bool)(retVal *T if upper { cupper = int32(1) } lib.Atg_CholeskySolveHelper(ptr, ts.ctensor, a.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CholeskySolveHelper() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CholeskySolveHelper") return retVal, err } @@ -1018,9 +1080,10 @@ func(ts *Tensor) _CholeskySolveHelperOut(out *Tensor, a *Tensor, upper bool, del if upper { cupper = int32(1) } lib.Atg_CholeskySolveHelperOut(ptr, out.ctensor, ts.ctensor, a.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CholeskySolveHelperOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CholeskySolveHelperOut") return retVal, err } @@ -1033,6 +1096,7 @@ func _ChunkGradOutputsEfficientAttention(query *Tensor, key *Tensor, value *Tens if isCausal { cisCausal = int32(1) } retVal = lib.Atg_ChunkGradOutputsEfficientAttention(query.ctensor, key.ctensor, value.ctensor, cisCausal) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ChunkGradOutputsEfficientAttention() failed: %w", err) return retVal, err } return retVal, err @@ -1047,9 +1111,10 @@ func(ts *Tensor) _Coalesce(del bool)(retVal *Tensor, err error) { lib.Atg_Coalesce(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Coalesce() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Coalesce") return retVal, err } @@ -1063,9 +1128,10 @@ func(ts *Tensor) _CoalesceOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.Atg_CoalesceOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CoalesceOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CoalesceOut") return retVal, err } @@ -1081,9 +1147,10 @@ func(ts *Tensor) _Coalesced(coalesced bool, del bool)(retVal *Tensor, err error) if coalesced { ccoalesced = int32(1) } lib.Atg_Coalesced(ptr, ts.ctensor, ccoalesced) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Coalesced() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Coalesced") return retVal, err } @@ -1098,6 +1165,7 @@ func(ts *Tensor) _Coalesced_(coalesced bool)(err error) { if coalesced { ccoalesced = int32(1) } lib.Atg_Coalesced_(ptr, ts.ctensor, ccoalesced) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Coalesced_() failed: %w", err) return err } ts.ctensor = *ptr @@ -1116,9 +1184,10 @@ func(ts *Tensor) _CoalescedOut(out *Tensor, coalesced bool, del bool)(retVal *Te if coalesced { ccoalesced = int32(1) } lib.Atg_CoalescedOut(ptr, out.ctensor, ts.ctensor, ccoalesced) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CoalescedOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CoalescedOut") return retVal, err } @@ -1131,9 +1200,10 @@ func _ComputeLinearCombination(input *Tensor, coefficients *Tensor)(retVal *Tens lib.Atg_ComputeLinearCombination(ptr, input.ctensor, coefficients.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ComputeLinearCombination() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ComputeLinearCombination") return retVal, err } @@ -1146,9 +1216,10 @@ func _ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tens lib.Atg_ComputeLinearCombinationOut(ptr, out.ctensor, input.ctensor, coefficients.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ComputeLinearCombinationOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ComputeLinearCombinationOut") return retVal, err } @@ -1162,9 +1233,10 @@ func(ts *Tensor) _Conj(del bool)(retVal *Tensor, err error) { lib.Atg_Conj(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Conj() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Conj") return retVal, err } @@ -1178,9 +1250,10 @@ func(ts *Tensor) _ConjCopy(del bool)(retVal *Tensor, err error) { lib.Atg_ConjCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConjCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConjCopy") return retVal, err } @@ -1194,9 +1267,10 @@ func(ts *Tensor) _ConjCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.Atg_ConjCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConjCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConjCopyOut") return retVal, err } @@ -1210,9 +1284,10 @@ func(ts *Tensor) _ConjPhysical(del bool)(retVal *Tensor, err error) { lib.Atg_ConjPhysical(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConjPhysical() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConjPhysical") return retVal, err } @@ -1226,9 +1301,10 @@ func(ts *Tensor) _ConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor, err err lib.Atg_ConjPhysicalOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConjPhysicalOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConjPhysicalOut") return retVal, err } @@ -1246,9 +1322,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.Atg_ConvDepthwise2d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvDepthwise2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvDepthwise2d") return retVal, err } @@ -1266,9 +1343,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.Atg_ConvDepthwise2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvDepthwise2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvDepthwise2dOut") return retVal, err } @@ -1284,9 +1362,10 @@ func(ts *Tensor) _ConvertIndicesFromCooToCsr(size int64, outInt32 bool, del bool if outInt32 { coutInt32 = int32(1) } lib.Atg_ConvertIndicesFromCooToCsr(ptr, ts.ctensor, size, coutInt32) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvertIndicesFromCooToCsr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvertIndicesFromCooToCsr") return retVal, err } @@ -1302,9 +1381,10 @@ func(ts *Tensor) _ConvertIndicesFromCooToCsrOut(out *Tensor, size int64, outInt3 if outInt32 { coutInt32 = int32(1) } lib.Atg_ConvertIndicesFromCooToCsrOut(ptr, out.ctensor, ts.ctensor, size, coutInt32) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvertIndicesFromCooToCsrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvertIndicesFromCooToCsrOut") return retVal, err } @@ -1321,9 +1401,10 @@ ctranspose := int32(0) if transpose { ctranspose = int32(1) } lib.Atg_ConvertIndicesFromCsrToCoo(ptr, crowIndices.ctensor, colIndices.ctensor, coutInt32, ctranspose) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvertIndicesFromCsrToCoo() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvertIndicesFromCsrToCoo") return retVal, err } @@ -1340,9 +1421,10 @@ ctranspose := int32(0) if transpose { ctranspose = int32(1) } lib.Atg_ConvertIndicesFromCsrToCooOut(ptr, out.ctensor, crowIndices.ctensor, colIndices.ctensor, coutInt32, ctranspose) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvertIndicesFromCsrToCooOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvertIndicesFromCsrToCooOut") return retVal, err } @@ -1369,9 +1451,10 @@ callowTf32 := int32(0) if allowTf32 { callowTf32 = int32(1) } lib.Atg_Convolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Convolution() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Convolution") return retVal, err } @@ -1396,9 +1479,10 @@ ccudnnEnabled := int32(0) if cudnnEnabled { ccudnnEnabled = int32(1) } lib.Atg_ConvolutionDeprecated(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups, cbenchmark, cdeterministic, ccudnnEnabled) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvolutionDeprecated() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvolutionDeprecated") return retVal, err } @@ -1413,9 +1497,10 @@ func _ConvolutionMode(input *Tensor, weight *Tensor, bias *Tensor, stride []int6 dilationLen := len(dilation) lib.Atg_ConvolutionMode(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvolutionMode() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvolutionMode") return retVal, err } @@ -1442,9 +1527,10 @@ callowTf32 := int32(0) if allowTf32 { callowTf32 = int32(1) } lib.Atg_ConvolutionOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ConvolutionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ConvolutionOut") return retVal, err } @@ -1460,9 +1546,10 @@ func(ts *Tensor) _CopyFrom(dst *Tensor, nonBlocking bool, del bool)(retVal *Tens if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CopyFrom(ptr, ts.ctensor, dst.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CopyFrom() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CopyFrom") return retVal, err } @@ -1476,9 +1563,10 @@ func(ts *Tensor) _CopyFromAndResize(dst *Tensor, del bool)(retVal *Tensor, err e lib.Atg_CopyFromAndResize(ptr, ts.ctensor, dst.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CopyFromAndResize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CopyFromAndResize") return retVal, err } @@ -1492,9 +1580,10 @@ func(ts *Tensor) _CopyFromAndResizeOut(out *Tensor, dst *Tensor, del bool)(retVa lib.Atg_CopyFromAndResizeOut(ptr, out.ctensor, ts.ctensor, dst.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CopyFromAndResizeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CopyFromAndResizeOut") return retVal, err } @@ -1510,9 +1599,10 @@ func(ts *Tensor) _CopyFromOut(out *Tensor, dst *Tensor, nonBlocking bool, del bo if nonBlocking { cnonBlocking = int32(1) } lib.Atg_CopyFromOut(ptr, out.ctensor, ts.ctensor, dst.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CopyFromOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CopyFromOut") return retVal, err } @@ -1529,10 +1619,11 @@ czeroInfinity := int32(0) if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CtcLoss() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_CtcLoss_0") + retVal1 = newTensor(*ctensorPtr1, "_CtcLoss_1") return retVal0, retVal1, err } @@ -1549,9 +1640,10 @@ czeroInfinity := int32(0) if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CtcLossBackward(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CtcLossBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CtcLossBackward") return retVal, err } @@ -1568,9 +1660,10 @@ czeroInfinity := int32(0) if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CtcLossBackwardOut(ptr, out.ctensor, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CtcLossBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CtcLossBackwardOut") return retVal, err } @@ -1585,9 +1678,10 @@ func _CtcLossBackwardTensor(grad *Tensor, logProbs *Tensor, targets *Tensor, inp if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CtcLossBackwardTensor(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CtcLossBackwardTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CtcLossBackwardTensor") return retVal, err } @@ -1604,10 +1698,11 @@ czeroInfinity := int32(0) if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CtcLossOut(ctensorPtr0, out0.ctensor, out1.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CtcLossOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_CtcLossOut_0") + retVal1 = newTensor(*ctensorPtr1, "_CtcLossOut_1") return retVal0, retVal1, err } @@ -1622,10 +1717,11 @@ func _CtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, tar if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CtcLossTensor(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CtcLossTensor() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_CtcLossTensor_0") + retVal1 = newTensor(*ctensorPtr1, "_CtcLossTensor_1") return retVal0, retVal1, err } @@ -1640,10 +1736,11 @@ func _CtcLossTensorOut(out0 *Tensor, out1 *Tensor, logProbs *Tensor, targets *Te if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CtcLossTensorOut(ctensorPtr0, out0.ctensor, out1.ctensor, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CtcLossTensorOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_CtcLossTensorOut_0") + retVal1 = newTensor(*ctensorPtr1, "_CtcLossTensorOut_1") return retVal0, retVal1, err } @@ -1662,10 +1759,11 @@ czeroInfinity := int32(0) if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CudnnCtcLoss(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, cdeterministic, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnCtcLoss() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_CudnnCtcLoss_0") + retVal1 = newTensor(*ctensorPtr1, "_CudnnCtcLoss_1") return retVal0, retVal1, err } @@ -1684,10 +1782,11 @@ czeroInfinity := int32(0) if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CudnnCtcLossOut(ctensorPtr0, out0.ctensor, out1.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, cdeterministic, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnCtcLossOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_CudnnCtcLossOut_0") + retVal1 = newTensor(*ctensorPtr1, "_CudnnCtcLossOut_1") return retVal0, retVal1, err } @@ -1704,10 +1803,11 @@ czeroInfinity := int32(0) if zeroInfinity { czeroInfinity = int32(1) } lib.Atg_CudnnCtcLossTensor(ctensorPtr0, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, cdeterministic, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnCtcLossTensor() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_CudnnCtcLossTensor_0") + retVal1 = newTensor(*ctensorPtr1, "_CudnnCtcLossTensor_1") return retVal0, retVal1, err } @@ -1722,9 +1822,10 @@ func _CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, opti if train { ctrain = int32(1) } lib.Atg_CudnnInitDropoutState(ptr, dropout, ctrain, dropoutSeed, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnInitDropoutState() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CudnnInitDropoutState") return retVal, err } @@ -1739,9 +1840,10 @@ func _CudnnInitDropoutStateOut(out *Tensor, dropout float64, train bool, dropout if train { ctrain = int32(1) } lib.Atg_CudnnInitDropoutStateOut(ptr, out.ctensor, dropout, ctrain, dropoutSeed) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnInitDropoutStateOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CudnnInitDropoutStateOut") return retVal, err } @@ -1766,13 +1868,14 @@ cbidirectional := int32(0) batchSizesLen := len(batchSizes) lib.Atg_CudnnRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, weightBuf.ctensor, hx.ctensor, cx.ctensor, mode, hiddenSize, projSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, batchSizesLen, dropoutState.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnRnn() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} + retVal0 = newTensor(*ctensorPtr0, "_CudnnRnn_0") + retVal1 = newTensor(*ctensorPtr1, "_CudnnRnn_1") + retVal2 = newTensor(*ctensorPtr2, "_CudnnRnn_2") + retVal3 = newTensor(*ctensorPtr3, "_CudnnRnn_3") + retVal4 = newTensor(*ctensorPtr4, "_CudnnRnn_4") return retVal0, retVal1, retVal2, retVal3, retVal4, err } @@ -1791,9 +1894,10 @@ cbidirectional := int32(0) if bidirectional { cbidirectional = int32(1) } lib.Atg_CudnnRnnFlattenWeight(ptr, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, cbatchFirst, cbidirectional) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnRnnFlattenWeight() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CudnnRnnFlattenWeight") return retVal, err } @@ -1812,9 +1916,10 @@ cbidirectional := int32(0) if bidirectional { cbidirectional = int32(1) } lib.Atg_CudnnRnnFlattenWeightOut(ptr, out.ctensor, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, projSize, numLayers, cbatchFirst, cbidirectional) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnRnnFlattenWeightOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_CudnnRnnFlattenWeightOut") return retVal, err } @@ -1839,13 +1944,14 @@ cbidirectional := int32(0) batchSizesLen := len(batchSizes) lib.Atg_CudnnRnnOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, out4.ctensor, input.ctensor, cweight, len(cweight), weightStride0, weightBuf.ctensor, hx.ctensor, cx.ctensor, mode, hiddenSize, projSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, batchSizesLen, dropoutState.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CudnnRnnOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} + retVal0 = newTensor(*ctensorPtr0, "_CudnnRnnOut_0") + retVal1 = newTensor(*ctensorPtr1, "_CudnnRnnOut_1") + retVal2 = newTensor(*ctensorPtr2, "_CudnnRnnOut_2") + retVal3 = newTensor(*ctensorPtr3, "_CudnnRnnOut_3") + retVal4 = newTensor(*ctensorPtr4, "_CudnnRnnOut_4") return retVal0, retVal1, retVal2, retVal3, retVal4, err } @@ -1856,6 +1962,7 @@ func _CufftGetPlanCacheMaxSize(deviceIndex int64)(retVal int64, err error) { retVal = lib.Atg_CufftGetPlanCacheMaxSize(deviceIndex) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CufftGetPlanCacheMaxSize() failed: %w", err) return retVal, err } return retVal, err @@ -1867,6 +1974,7 @@ func _CufftGetPlanCacheSize(deviceIndex int64)(retVal int64, err error) { retVal = lib.Atg_CufftGetPlanCacheSize(deviceIndex) if err = TorchErr(); err != nil { + err = fmt.Errorf("_CufftGetPlanCacheSize() failed: %w", err) return retVal, err } return retVal, err @@ -1879,6 +1987,7 @@ func(ts *Tensor) _DebugHasInternalOverlap(del bool)(retVal int64, err error) { retVal = lib.Atg_DebugHasInternalOverlap(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_DebugHasInternalOverlap() failed: %w", err) return retVal, err } return retVal, err @@ -1892,9 +2001,10 @@ func _DimArange(like *Tensor, dim int64)(retVal *Tensor, err error) { lib.Atg_DimArange(ptr, like.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_DimArange() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_DimArange") return retVal, err } @@ -1906,6 +2016,7 @@ func(ts *Tensor) _Dimi(del bool)(retVal int64, err error) { retVal = lib.Atg_Dimi(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Dimi() failed: %w", err) return retVal, err } return retVal, err @@ -1918,6 +2029,7 @@ func(ts *Tensor) _Dimv(del bool)(retVal int64, err error) { retVal = lib.Atg_Dimv(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Dimv() failed: %w", err) return retVal, err } return retVal, err @@ -1931,9 +2043,10 @@ func _DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor, err lib.Atg_DirichletGrad(ptr, x.ctensor, alpha.ctensor, total.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_DirichletGrad() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_DirichletGrad") return retVal, err } @@ -1946,9 +2059,10 @@ func _DirichletGradOut(out *Tensor, x *Tensor, alpha *Tensor, total *Tensor)(ret lib.Atg_DirichletGradOut(ptr, out.ctensor, x.ctensor, alpha.ctensor, total.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_DirichletGradOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_DirichletGradOut") return retVal, err } @@ -1966,11 +2080,12 @@ cchunkGradOutputs := int32(0) if chunkGradOutputs { cchunkGradOutputs = int32(1) } lib.Atg_EfficientAttentionBackward(ctensorPtr0, gradOut_.ctensor, query.ctensor, key.ctensor, value.ctensor, out.ctensor, logsumexp.ctensor, cisCausal, cchunkGradOutputs) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EfficientAttentionBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_EfficientAttentionBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_EfficientAttentionBackward_1") + retVal2 = newTensor(*ctensorPtr2, "_EfficientAttentionBackward_2") return retVal0, retVal1, retVal2, err } @@ -1984,9 +2099,10 @@ func _Efficientzerotensor(size []int64, optionsKind gotch.DType, optionsDevice g sizeLen := len(size) lib.Atg_Efficientzerotensor(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Efficientzerotensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Efficientzerotensor") return retVal, err } @@ -2000,9 +2116,10 @@ func _EfficientzerotensorOut(out *Tensor, size []int64)(retVal *Tensor, err erro sizeLen := len(size) lib.Atg_EfficientzerotensorOut(ptr, out.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EfficientzerotensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EfficientzerotensorOut") return retVal, err } @@ -2023,12 +2140,13 @@ cincludeLastOffset := int32(0) if includeLastOffset { cincludeLastOffset = int32(1) } lib.Atg_EmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBag() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_EmbeddingBag_0") + retVal1 = newTensor(*ctensorPtr1, "_EmbeddingBag_1") + retVal2 = newTensor(*ctensorPtr2, "_EmbeddingBag_2") + retVal3 = newTensor(*ctensorPtr3, "_EmbeddingBag_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -2045,9 +2163,10 @@ csparse := int32(0) if sparse { csparse = int32(1) } lib.Atg_EmbeddingBagBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmbeddingBagBackward") return retVal, err } @@ -2062,9 +2181,10 @@ func _EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offset2bag *Tenso if scaleGradByFreq { cscaleGradByFreq = int32(1) } lib.Atg_EmbeddingBagDenseBackward(ptr, grad.ctensor, indices.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagDenseBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmbeddingBagDenseBackward") return retVal, err } @@ -2079,9 +2199,10 @@ func _EmbeddingBagDenseBackwardOut(out *Tensor, grad *Tensor, indices *Tensor, o if scaleGradByFreq { cscaleGradByFreq = int32(1) } lib.Atg_EmbeddingBagDenseBackwardOut(ptr, out.ctensor, grad.ctensor, indices.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagDenseBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmbeddingBagDenseBackwardOut") return retVal, err } @@ -2102,12 +2223,13 @@ cincludeLastOffset := int32(0) if includeLastOffset { cincludeLastOffset = int32(1) } lib.Atg_EmbeddingBagForwardOnly(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagForwardOnly() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_EmbeddingBagForwardOnly_0") + retVal1 = newTensor(*ctensorPtr1, "_EmbeddingBagForwardOnly_1") + retVal2 = newTensor(*ctensorPtr2, "_EmbeddingBagForwardOnly_2") + retVal3 = newTensor(*ctensorPtr3, "_EmbeddingBagForwardOnly_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -2128,12 +2250,13 @@ cincludeLastOffset := int32(0) if includeLastOffset { cincludeLastOffset = int32(1) } lib.Atg_EmbeddingBagForwardOnlyOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagForwardOnlyOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_EmbeddingBagForwardOnlyOut_0") + retVal1 = newTensor(*ctensorPtr1, "_EmbeddingBagForwardOnlyOut_1") + retVal2 = newTensor(*ctensorPtr2, "_EmbeddingBagForwardOnlyOut_2") + retVal3 = newTensor(*ctensorPtr3, "_EmbeddingBagForwardOnlyOut_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -2154,12 +2277,13 @@ cincludeLastOffset := int32(0) if includeLastOffset { cincludeLastOffset = int32(1) } lib.Atg_EmbeddingBagOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_EmbeddingBagOut_0") + retVal1 = newTensor(*ctensorPtr1, "_EmbeddingBagOut_1") + retVal2 = newTensor(*ctensorPtr2, "_EmbeddingBagOut_2") + retVal3 = newTensor(*ctensorPtr3, "_EmbeddingBagOut_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -2172,9 +2296,10 @@ func _EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices lib.Atg_EmbeddingBagPerSampleWeightsBackward(ptr, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagPerSampleWeightsBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmbeddingBagPerSampleWeightsBackward") return retVal, err } @@ -2187,9 +2312,10 @@ func _EmbeddingBagPerSampleWeightsBackwardOut(out *Tensor, grad *Tensor, weight lib.Atg_EmbeddingBagPerSampleWeightsBackwardOut(ptr, out.ctensor, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagPerSampleWeightsBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmbeddingBagPerSampleWeightsBackwardOut") return retVal, err } @@ -2204,9 +2330,10 @@ func _EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, if scaleGradByFreq { cscaleGradByFreq = int32(1) } lib.Atg_EmbeddingBagSparseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor, paddingIdx) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmbeddingBagSparseBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmbeddingBagSparseBackward") return retVal, err } @@ -2220,9 +2347,10 @@ func _EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice sizeLen := len(size) lib.Atg_EmptyAffineQuantized(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt(), scale, zeroPoint) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmptyAffineQuantized() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmptyAffineQuantized") return retVal, err } @@ -2236,9 +2364,10 @@ func _EmptyAffineQuantizedOut(out *Tensor, size []int64, scale float64, zeroPoin sizeLen := len(size) lib.Atg_EmptyAffineQuantizedOut(ptr, out.ctensor, size, sizeLen, scale, zeroPoint) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmptyAffineQuantizedOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmptyAffineQuantizedOut") return retVal, err } @@ -2252,9 +2381,10 @@ func _EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *T sizeLen := len(size) lib.Atg_EmptyPerChannelAffineQuantized(ptr, size, sizeLen, scales.ctensor, zeroPoints.ctensor, axis, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmptyPerChannelAffineQuantized() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmptyPerChannelAffineQuantized") return retVal, err } @@ -2268,9 +2398,10 @@ func _EmptyPerChannelAffineQuantizedOut(out *Tensor, size []int64, scales *Tenso sizeLen := len(size) lib.Atg_EmptyPerChannelAffineQuantizedOut(ptr, out.ctensor, size, sizeLen, scales.ctensor, zeroPoints.ctensor, axis) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EmptyPerChannelAffineQuantizedOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EmptyPerChannelAffineQuantizedOut") return retVal, err } @@ -2283,9 +2414,10 @@ func _EuclideanDist(x1 *Tensor, x2 *Tensor)(retVal *Tensor, err error) { lib.Atg_EuclideanDist(ptr, x1.ctensor, x2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EuclideanDist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EuclideanDist") return retVal, err } @@ -2298,9 +2430,10 @@ func _EuclideanDistOut(out *Tensor, x1 *Tensor, x2 *Tensor)(retVal *Tensor, err lib.Atg_EuclideanDistOut(ptr, out.ctensor, x1.ctensor, x2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_EuclideanDistOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_EuclideanDistOut") return retVal, err } @@ -2314,9 +2447,10 @@ func(ts *Tensor) _FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint lib.Atg_FakeQuantizeLearnablePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FakeQuantizeLearnablePerChannelAffine() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FakeQuantizeLearnablePerChannelAffine") return retVal, err } @@ -2331,11 +2465,12 @@ func(ts *Tensor) _FakeQuantizeLearnablePerChannelAffineBackward(grad *Tensor, sc lib.Atg_FakeQuantizeLearnablePerChannelAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FakeQuantizeLearnablePerChannelAffineBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_FakeQuantizeLearnablePerChannelAffineBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_FakeQuantizeLearnablePerChannelAffineBackward_1") + retVal2 = newTensor(*ctensorPtr2, "_FakeQuantizeLearnablePerChannelAffineBackward_2") return retVal0, retVal1, retVal2, err } @@ -2349,9 +2484,10 @@ func(ts *Tensor) _FakeQuantizeLearnablePerChannelAffineOut(out *Tensor, scale *T lib.Atg_FakeQuantizeLearnablePerChannelAffineOut(ptr, out.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax, gradFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FakeQuantizeLearnablePerChannelAffineOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FakeQuantizeLearnablePerChannelAffineOut") return retVal, err } @@ -2365,9 +2501,10 @@ func(ts *Tensor) _FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint lib.Atg_FakeQuantizeLearnablePerTensorAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FakeQuantizeLearnablePerTensorAffine() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FakeQuantizeLearnablePerTensorAffine") return retVal, err } @@ -2382,11 +2519,12 @@ func(ts *Tensor) _FakeQuantizeLearnablePerTensorAffineBackward(grad *Tensor, sca lib.Atg_FakeQuantizeLearnablePerTensorAffineBackward(ctensorPtr0, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FakeQuantizeLearnablePerTensorAffineBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_FakeQuantizeLearnablePerTensorAffineBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_FakeQuantizeLearnablePerTensorAffineBackward_1") + retVal2 = newTensor(*ctensorPtr2, "_FakeQuantizeLearnablePerTensorAffineBackward_2") return retVal0, retVal1, retVal2, err } @@ -2400,9 +2538,10 @@ func(ts *Tensor) _FakeQuantizeLearnablePerTensorAffineOut(out *Tensor, scale *Te lib.Atg_FakeQuantizeLearnablePerTensorAffineOut(ptr, out.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax, gradFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FakeQuantizeLearnablePerTensorAffineOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FakeQuantizeLearnablePerTensorAffineOut") return retVal, err } @@ -2416,10 +2555,11 @@ func(ts *Tensor) _FakeQuantizePerTensorAffineCachemaskTensorQparams(scale *Tenso lib.Atg_FakeQuantizePerTensorAffineCachemaskTensorQparams(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, fakeQuantEnabled.ctensor, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FakeQuantizePerTensorAffineCachemaskTensorQparams() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_FakeQuantizePerTensorAffineCachemaskTensorQparams_0") + retVal1 = newTensor(*ctensorPtr1, "_FakeQuantizePerTensorAffineCachemaskTensorQparams_1") return retVal0, retVal1, err } @@ -2433,10 +2573,11 @@ func(ts *Tensor) _FakeQuantizePerTensorAffineCachemaskTensorQparamsOut(out0 *Ten lib.Atg_FakeQuantizePerTensorAffineCachemaskTensorQparamsOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, fakeQuantEnabled.ctensor, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FakeQuantizePerTensorAffineCachemaskTensorQparamsOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_FakeQuantizePerTensorAffineCachemaskTensorQparamsOut_0") + retVal1 = newTensor(*ctensorPtr1, "_FakeQuantizePerTensorAffineCachemaskTensorQparamsOut_1") return retVal0, retVal1, err } @@ -2453,9 +2594,10 @@ cforward := int32(0) if forward { cforward = int32(1) } lib.Atg_FftC2c(ptr, ts.ctensor, dim, dimLen, normalization, cforward) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FftC2c() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FftC2c") return retVal, err } @@ -2472,9 +2614,10 @@ cforward := int32(0) if forward { cforward = int32(1) } lib.Atg_FftC2cOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, normalization, cforward) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FftC2cOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FftC2cOut") return retVal, err } @@ -2489,9 +2632,10 @@ func(ts *Tensor) _FftC2r(dim []int64, normalization int64, lastDimSize int64, de dimLen := len(dim) lib.Atg_FftC2r(ptr, ts.ctensor, dim, dimLen, normalization, lastDimSize) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FftC2r() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FftC2r") return retVal, err } @@ -2506,9 +2650,10 @@ func(ts *Tensor) _FftC2rOut(out *Tensor, dim []int64, normalization int64, lastD dimLen := len(dim) lib.Atg_FftC2rOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, normalization, lastDimSize) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FftC2rOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FftC2rOut") return retVal, err } @@ -2525,9 +2670,10 @@ conesided := int32(0) if onesided { conesided = int32(1) } lib.Atg_FftR2c(ptr, ts.ctensor, dim, dimLen, normalization, conesided) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FftR2c() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FftR2c") return retVal, err } @@ -2544,9 +2690,10 @@ conesided := int32(0) if onesided { conesided = int32(1) } lib.Atg_FftR2cOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, normalization, conesided) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FftR2cOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FftR2cOut") return retVal, err } @@ -2562,11 +2709,12 @@ func _FlashAttentionBackward(gradOut *Tensor, query *Tensor, key *Tensor, value if isCausal { cisCausal = int32(1) } lib.Atg_FlashAttentionBackward(ctensorPtr0, gradOut.ctensor, query.ctensor, key.ctensor, value.ctensor, out.ctensor, logsumexp.ctensor, cumSeqQ.ctensor, cumSeqK.ctensor, maxQ, maxK, dropoutP, cisCausal, philoxSeed, philoxOffset) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FlashAttentionBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_FlashAttentionBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_FlashAttentionBackward_1") + retVal2 = newTensor(*ctensorPtr2, "_FlashAttentionBackward_2") return retVal0, retVal1, retVal2, err } @@ -2586,9 +2734,10 @@ carg3 := int32(0) if arg3 { carg3 = int32(1) } lib.Atg_Foobar(ptr, ts.ctensor, carg1, carg2, carg3) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Foobar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Foobar") return retVal, err } @@ -2608,9 +2757,10 @@ carg3 := int32(0) if arg3 { carg3 = int32(1) } lib.Atg_FoobarOut(ptr, out.ctensor, ts.ctensor, carg1, carg2, carg3) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FoobarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FoobarOut") return retVal, err } @@ -2624,10 +2774,11 @@ func(ts *Tensor) _FusedDropout(p float64, del bool)(retVal0 *Tensor, retVal1 *Te lib.Atg_FusedDropout(ctensorPtr0, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FusedDropout() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_FusedDropout_0") + retVal1 = newTensor(*ctensorPtr1, "_FusedDropout_1") return retVal0, retVal1, err } @@ -2641,10 +2792,11 @@ func(ts *Tensor) _FusedDropoutOut(out0 *Tensor, out1 *Tensor, p float64, del boo lib.Atg_FusedDropoutOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FusedDropoutOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_FusedDropoutOut_0") + retVal1 = newTensor(*ctensorPtr1, "_FusedDropoutOut_1") return retVal0, retVal1, err } @@ -2662,10 +2814,11 @@ csymmetricQuant := int32(0) if symmetricQuant { csymmetricQuant = int32(1) } lib.Atg_FusedMovingAvgObsFqHelper(ctensorPtr0, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FusedMovingAvgObsFqHelper() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_FusedMovingAvgObsFqHelper_0") + retVal1 = newTensor(*ctensorPtr1, "_FusedMovingAvgObsFqHelper_1") return retVal0, retVal1, err } @@ -2687,14 +2840,15 @@ csymmetricQuant := int32(0) if symmetricQuant { csymmetricQuant = int32(1) } lib.Atg_FusedMovingAvgObsFqHelperFunctional(ctensorPtr0, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FusedMovingAvgObsFqHelperFunctional() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} - retVal5 = &Tensor{ctensor: *ctensorPtr5} + retVal0 = newTensor(*ctensorPtr0, "_FusedMovingAvgObsFqHelperFunctional_0") + retVal1 = newTensor(*ctensorPtr1, "_FusedMovingAvgObsFqHelperFunctional_1") + retVal2 = newTensor(*ctensorPtr2, "_FusedMovingAvgObsFqHelperFunctional_2") + retVal3 = newTensor(*ctensorPtr3, "_FusedMovingAvgObsFqHelperFunctional_3") + retVal4 = newTensor(*ctensorPtr4, "_FusedMovingAvgObsFqHelperFunctional_4") + retVal5 = newTensor(*ctensorPtr5, "_FusedMovingAvgObsFqHelperFunctional_5") return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err } @@ -2712,10 +2866,11 @@ csymmetricQuant := int32(0) if symmetricQuant { csymmetricQuant = int32(1) } lib.Atg_FusedMovingAvgObsFqHelperOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FusedMovingAvgObsFqHelperOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_FusedMovingAvgObsFqHelperOut_0") + retVal1 = newTensor(*ctensorPtr1, "_FusedMovingAvgObsFqHelperOut_1") return retVal0, retVal1, err } @@ -2728,6 +2883,7 @@ func _FusedSdpChoice(query *Tensor, key *Tensor, value *Tensor, attnMask *Tensor if isCausal { cisCausal = int32(1) } retVal = lib.Atg_FusedSdpChoice(query.ctensor, key.ctensor, value.ctensor, attnMask.ctensor, dropoutP, cisCausal) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FusedSdpChoice() failed: %w", err) return retVal, err } return retVal, err @@ -2742,9 +2898,10 @@ func(ts *Tensor) _FwPrimal(level int64, del bool)(retVal *Tensor, err error) { lib.Atg_FwPrimal(ptr, ts.ctensor, level) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FwPrimal() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FwPrimal") return retVal, err } @@ -2758,9 +2915,10 @@ func(ts *Tensor) _FwPrimalCopy(level int64, del bool)(retVal *Tensor, err error) lib.Atg_FwPrimalCopy(ptr, ts.ctensor, level) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FwPrimalCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FwPrimalCopy") return retVal, err } @@ -2774,9 +2932,10 @@ func(ts *Tensor) _FwPrimalCopyOut(out *Tensor, level int64, del bool)(retVal *Te lib.Atg_FwPrimalCopyOut(ptr, out.ctensor, ts.ctensor, level) if err = TorchErr(); err != nil { + err = fmt.Errorf("_FwPrimalCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_FwPrimalCopyOut") return retVal, err } @@ -2790,9 +2949,10 @@ func(ts *Tensor) _GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, d lib.Atg_GatherSparseBackward(ptr, ts.ctensor, dim, index.ctensor, grad.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_GatherSparseBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_GatherSparseBackward") return retVal, err } @@ -2807,9 +2967,10 @@ func _GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode in if alignCorners { calignCorners = int32(1) } lib.Atg_GridSampler2dCpuFallback(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("_GridSampler2dCpuFallback() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_GridSampler2dCpuFallback") return retVal, err } @@ -2824,10 +2985,11 @@ func _GridSampler2dCpuFallbackBackward(gradOutput *Tensor, input *Tensor, grid * if alignCorners { calignCorners = int32(1) } lib.Atg_GridSampler2dCpuFallbackBackward(ctensorPtr0, gradOutput.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("_GridSampler2dCpuFallbackBackward() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_GridSampler2dCpuFallbackBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_GridSampler2dCpuFallbackBackward_1") return retVal0, retVal1, err } @@ -2842,9 +3004,10 @@ func _GridSampler2dCpuFallbackOut(out *Tensor, input *Tensor, grid *Tensor, inte if alignCorners { calignCorners = int32(1) } lib.Atg_GridSampler2dCpuFallbackOut(ptr, out.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("_GridSampler2dCpuFallbackOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_GridSampler2dCpuFallbackOut") return retVal, err } @@ -2856,6 +3019,7 @@ func(ts *Tensor) _HasCompatibleShallowCopyType(from *Tensor, del bool)(retVal bo retVal = lib.Atg_HasCompatibleShallowCopyType(ts.ctensor, from.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_HasCompatibleShallowCopyType() failed: %w", err) return retVal, err } return retVal, err @@ -2868,6 +3032,7 @@ func(ts *Tensor) _HasSameStorageNumel(other *Tensor, del bool)(retVal bool, err retVal = lib.Atg_HasSameStorageNumel(ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_HasSameStorageNumel() failed: %w", err) return retVal, err } return retVal, err @@ -2886,9 +3051,10 @@ cdensity := int32(0) if density { cdensity = int32(1) } lib.Atg_HistogramddFromBinCts(ptr, out.ctensor, ts.ctensor, bins, binsLen, rangeVals, rangeValsLen, weight.ctensor, cdensity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_HistogramddFromBinCts() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_HistogramddFromBinCts") return retVal, err } @@ -2906,9 +3072,10 @@ cdensity := int32(0) if density { cdensity = int32(1) } lib.Atg_HistogramddFromBinTensors(ptr, ts.ctensor, cbins, len(cbins), weight.ctensor, cdensity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_HistogramddFromBinTensors() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_HistogramddFromBinTensors") return retVal, err } @@ -2926,9 +3093,10 @@ cdensity := int32(0) if density { cdensity = int32(1) } lib.Atg_HistogramddFromBinTensorsOut(ptr, out.ctensor, ts.ctensor, cbins, len(cbins), weight.ctensor, cdensity) if err = TorchErr(); err != nil { + err = fmt.Errorf("_HistogramddFromBinTensorsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_HistogramddFromBinTensorsOut") return retVal, err } @@ -2948,9 +3116,10 @@ cunsafety := int32(0) if unsafety { cunsafety = int32(1) } lib.Atg_IndexPutImpl(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate, cunsafety) if err = TorchErr(); err != nil { + err = fmt.Errorf("_IndexPutImpl() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_IndexPutImpl") return retVal, err } @@ -2970,9 +3139,10 @@ cunsafety := int32(0) if unsafety { cunsafety = int32(1) } lib.Atg_IndexPutImplOut(ptr, out.ctensor, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate, cunsafety) if err = TorchErr(); err != nil { + err = fmt.Errorf("_IndexPutImplOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_IndexPutImplOut") return retVal, err } @@ -2986,9 +3156,10 @@ func(ts *Tensor) _Indices(del bool)(retVal *Tensor, err error) { lib.Atg_Indices(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Indices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Indices") return retVal, err } @@ -3002,9 +3173,10 @@ func(ts *Tensor) _IndicesCopy(del bool)(retVal *Tensor, err error) { lib.Atg_IndicesCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_IndicesCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_IndicesCopy") return retVal, err } @@ -3018,9 +3190,10 @@ func(ts *Tensor) _IndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err erro lib.Atg_IndicesCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_IndicesCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_IndicesCopyOut") return retVal, err } @@ -3034,9 +3207,10 @@ func(ts *Tensor) _IsAllTrue(del bool)(retVal *Tensor, err error) { lib.Atg_IsAllTrue(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_IsAllTrue() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_IsAllTrue") return retVal, err } @@ -3050,9 +3224,10 @@ func(ts *Tensor) _IsAnyTrue(del bool)(retVal *Tensor, err error) { lib.Atg_IsAnyTrue(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_IsAnyTrue() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_IsAnyTrue") return retVal, err } @@ -3064,6 +3239,7 @@ func(ts *Tensor) _IsZerotensor(del bool)(retVal bool, err error) { retVal = lib.Atg_IsZerotensor(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_IsZerotensor() failed: %w", err) return retVal, err } return retVal, err @@ -3078,11 +3254,12 @@ func _LinalgDet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor, er lib.Atg_LinalgDet(ctensorPtr0, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgDet() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_LinalgDet_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgDet_1") + retVal2 = newTensor(*ctensorPtr2, "_LinalgDet_2") return retVal0, retVal1, retVal2, err } @@ -3096,11 +3273,12 @@ func _LinalgDetResult(result *Tensor, lU *Tensor, pivots *Tensor, a *Tensor)(ret lib.Atg_LinalgDetResult(ctensorPtr0, result.ctensor, lU.ctensor, pivots.ctensor, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgDetResult() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_LinalgDetResult_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgDetResult_1") + retVal2 = newTensor(*ctensorPtr2, "_LinalgDetResult_2") return retVal0, retVal1, retVal2, err } @@ -3115,10 +3293,11 @@ func _LinalgEigh(a *Tensor, uPLO string, computeV bool)(retVal0 *Tensor, retVal1 if computeV { ccomputeV = int32(1) } lib.Atg_LinalgEigh(ctensorPtr0, a.ctensor, uPLO, ccomputeV) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgEigh() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_LinalgEigh_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgEigh_1") return retVal0, retVal1, err } @@ -3133,10 +3312,11 @@ func _LinalgEighEigenvalues(eigenvalues *Tensor, eigenvectors *Tensor, a *Tensor if computeV { ccomputeV = int32(1) } lib.Atg_LinalgEighEigenvalues(ctensorPtr0, eigenvalues.ctensor, eigenvectors.ctensor, a.ctensor, uPLO, ccomputeV) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgEighEigenvalues() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_LinalgEighEigenvalues_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgEighEigenvalues_1") return retVal0, retVal1, err } @@ -3151,12 +3331,13 @@ func _LinalgSlogdet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, retVal2 *Tensor lib.Atg_LinalgSlogdet(ctensorPtr0, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgSlogdet() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_LinalgSlogdet_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgSlogdet_1") + retVal2 = newTensor(*ctensorPtr2, "_LinalgSlogdet_2") + retVal3 = newTensor(*ctensorPtr3, "_LinalgSlogdet_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -3171,12 +3352,13 @@ func _LinalgSlogdetSign(sign *Tensor, logabsdet *Tensor, lU *Tensor, pivots *Ten lib.Atg_LinalgSlogdetSign(ctensorPtr0, sign.ctensor, logabsdet.ctensor, lU.ctensor, pivots.ctensor, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgSlogdetSign() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_LinalgSlogdetSign_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgSlogdetSign_1") + retVal2 = newTensor(*ctensorPtr2, "_LinalgSlogdetSign_2") + retVal3 = newTensor(*ctensorPtr3, "_LinalgSlogdetSign_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -3195,12 +3377,13 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.Atg_LinalgSolveEx(ctensorPtr0, a.ctensor, b.ctensor, cleft, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgSolveEx() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_LinalgSolveEx_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgSolveEx_1") + retVal2 = newTensor(*ctensorPtr2, "_LinalgSolveEx_2") + retVal3 = newTensor(*ctensorPtr3, "_LinalgSolveEx_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -3219,12 +3402,13 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.Atg_LinalgSolveExResult(ctensorPtr0, result.ctensor, lU.ctensor, pivots.ctensor, info.ctensor, a.ctensor, b.ctensor, cleft, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgSolveExResult() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_LinalgSolveExResult_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgSolveExResult_1") + retVal2 = newTensor(*ctensorPtr2, "_LinalgSolveExResult_2") + retVal3 = newTensor(*ctensorPtr3, "_LinalgSolveExResult_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -3242,11 +3426,12 @@ ccomputeUv := int32(0) if computeUv { ccomputeUv = int32(1) } lib.Atg_LinalgSvd(ctensorPtr0, a.ctensor, cfullMatrices, ccomputeUv, driver) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgSvd() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_LinalgSvd_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgSvd_1") + retVal2 = newTensor(*ctensorPtr2, "_LinalgSvd_2") return retVal0, retVal1, retVal2, err } @@ -3264,11 +3449,12 @@ ccomputeUv := int32(0) if computeUv { ccomputeUv = int32(1) } lib.Atg_LinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, a.ctensor, cfullMatrices, ccomputeUv, driver) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LinalgSvdU() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_LinalgSvdU_0") + retVal1 = newTensor(*ctensorPtr1, "_LinalgSvdU_1") + retVal2 = newTensor(*ctensorPtr2, "_LinalgSvdU_2") return retVal0, retVal1, retVal2, err } @@ -3284,9 +3470,10 @@ func(ts *Tensor) _LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tens if halfToFloat { chalfToFloat = int32(1) } lib.Atg_LogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LogSoftmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_LogSoftmax") return retVal, err } @@ -3299,9 +3486,10 @@ func _LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inpu lib.Atg_LogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LogSoftmaxBackwardData() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_LogSoftmaxBackwardData") return retVal, err } @@ -3314,9 +3502,10 @@ func _LogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, output *Tensor, lib.Atg_LogSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LogSoftmaxBackwardDataOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_LogSoftmaxBackwardDataOut") return retVal, err } @@ -3332,9 +3521,10 @@ func(ts *Tensor) _LogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bo if halfToFloat { chalfToFloat = int32(1) } lib.Atg_LogSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LogSoftmaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_LogSoftmaxOut") return retVal, err } @@ -3348,9 +3538,10 @@ func(ts *Tensor) _Logcumsumexp(dim int64, del bool)(retVal *Tensor, err error) { lib.Atg_Logcumsumexp(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Logcumsumexp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Logcumsumexp") return retVal, err } @@ -3364,9 +3555,10 @@ func(ts *Tensor) _LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tens lib.Atg_LogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LogcumsumexpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_LogcumsumexpOut") return retVal, err } @@ -3395,14 +3587,15 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.Atg_LstmMps(ctensorPtr0, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LstmMps() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} - retVal5 = &Tensor{ctensor: *ctensorPtr5} + retVal0 = newTensor(*ctensorPtr0, "_LstmMps_0") + retVal1 = newTensor(*ctensorPtr1, "_LstmMps_1") + retVal2 = newTensor(*ctensorPtr2, "_LstmMps_2") + retVal3 = newTensor(*ctensorPtr3, "_LstmMps_3") + retVal4 = newTensor(*ctensorPtr4, "_LstmMps_4") + retVal5 = newTensor(*ctensorPtr5, "_LstmMps_5") return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err } @@ -3431,14 +3624,15 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.Atg_LstmMpsOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, out4.ctensor, out5.ctensor, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LstmMpsOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} - retVal5 = &Tensor{ctensor: *ctensorPtr5} + retVal0 = newTensor(*ctensorPtr0, "_LstmMpsOut_0") + retVal1 = newTensor(*ctensorPtr1, "_LstmMpsOut_1") + retVal2 = newTensor(*ctensorPtr2, "_LstmMpsOut_2") + retVal3 = newTensor(*ctensorPtr3, "_LstmMpsOut_3") + retVal4 = newTensor(*ctensorPtr4, "_LstmMpsOut_4") + retVal5 = newTensor(*ctensorPtr5, "_LstmMpsOut_5") return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, err } @@ -3457,11 +3651,12 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.Atg_LuWithInfo(ctensorPtr0, ts.ctensor, cpivot, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("_LuWithInfo() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_LuWithInfo_0") + retVal1 = newTensor(*ctensorPtr1, "_LuWithInfo_1") + retVal2 = newTensor(*ctensorPtr2, "_LuWithInfo_2") return retVal0, retVal1, retVal2, err } @@ -3474,9 +3669,10 @@ func _MakeDual(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor, err lib.Atg_MakeDual(ptr, primal.ctensor, tangent.ctensor, level) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MakeDual() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MakeDual") return retVal, err } @@ -3489,9 +3685,10 @@ func _MakeDualCopy(primal *Tensor, tangent *Tensor, level int64)(retVal *Tensor, lib.Atg_MakeDualCopy(ptr, primal.ctensor, tangent.ctensor, level) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MakeDualCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MakeDualCopy") return retVal, err } @@ -3504,9 +3701,10 @@ func _MakeDualCopyOut(out *Tensor, primal *Tensor, tangent *Tensor, level int64) lib.Atg_MakeDualCopyOut(ptr, out.ctensor, primal.ctensor, tangent.ctensor, level) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MakeDualCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MakeDualCopyOut") return retVal, err } @@ -3520,9 +3718,10 @@ func(ts *Tensor) _MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor lib.Atg_MakePerChannelQuantizedTensor(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MakePerChannelQuantizedTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MakePerChannelQuantizedTensor") return retVal, err } @@ -3536,9 +3735,10 @@ func(ts *Tensor) _MakePerChannelQuantizedTensorOut(out *Tensor, scale *Tensor, z lib.Atg_MakePerChannelQuantizedTensorOut(ptr, out.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MakePerChannelQuantizedTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MakePerChannelQuantizedTensorOut") return retVal, err } @@ -3552,9 +3752,10 @@ func(ts *Tensor) _MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, d lib.Atg_MakePerTensorQuantizedTensor(ptr, ts.ctensor, scale, zeroPoint) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MakePerTensorQuantizedTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MakePerTensorQuantizedTensor") return retVal, err } @@ -3568,9 +3769,10 @@ func(ts *Tensor) _MakePerTensorQuantizedTensorOut(out *Tensor, scale float64, ze lib.Atg_MakePerTensorQuantizedTensorOut(ptr, out.ctensor, ts.ctensor, scale, zeroPoint) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MakePerTensorQuantizedTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MakePerTensorQuantizedTensorOut") return retVal, err } @@ -3584,9 +3786,10 @@ func(ts *Tensor) _MaskedScale(mask *Tensor, scale float64, del bool)(retVal *Ten lib.Atg_MaskedScale(ptr, ts.ctensor, mask.ctensor, scale) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MaskedScale() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MaskedScale") return retVal, err } @@ -3600,9 +3803,10 @@ func(ts *Tensor) _MaskedScaleOut(out *Tensor, mask *Tensor, scale float64, del b lib.Atg_MaskedScaleOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, scale) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MaskedScaleOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MaskedScaleOut") return retVal, err } @@ -3628,9 +3832,10 @@ var cmaskTypeVal int64 = 0 } lib.Atg_MaskedSoftmax(ptr, ts.ctensor, mask.ctensor, cdimVal, cdimNull, cmaskTypeVal, cmaskTypeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MaskedSoftmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MaskedSoftmax") return retVal, err } @@ -3649,9 +3854,10 @@ func _MaskedSoftmaxBackward(gradOutput *Tensor, output *Tensor, mask *Tensor, di } lib.Atg_MaskedSoftmaxBackward(ptr, gradOutput.ctensor, output.ctensor, mask.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MaskedSoftmaxBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MaskedSoftmaxBackward") return retVal, err } @@ -3670,9 +3876,10 @@ func _MaskedSoftmaxBackwardOut(out *Tensor, gradOutput *Tensor, output *Tensor, } lib.Atg_MaskedSoftmaxBackwardOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, mask.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MaskedSoftmaxBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MaskedSoftmaxBackwardOut") return retVal, err } @@ -3698,9 +3905,10 @@ var cmaskTypeVal int64 = 0 } lib.Atg_MaskedSoftmaxOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, cdimVal, cdimNull, cmaskTypeVal, cmaskTypeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MaskedSoftmaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MaskedSoftmaxOut") return retVal, err } @@ -3715,9 +3923,10 @@ func(ts *Tensor) _MkldnnReshape(shape []int64, del bool)(retVal *Tensor, err err shapeLen := len(shape) lib.Atg_MkldnnReshape(ptr, ts.ctensor, shape, shapeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MkldnnReshape() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MkldnnReshape") return retVal, err } @@ -3732,9 +3941,10 @@ func(ts *Tensor) _MkldnnReshapeOut(out *Tensor, shape []int64, del bool)(retVal shapeLen := len(shape) lib.Atg_MkldnnReshapeOut(ptr, out.ctensor, ts.ctensor, shape, shapeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MkldnnReshapeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MkldnnReshapeOut") return retVal, err } @@ -3748,9 +3958,10 @@ func(ts *Tensor) _MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tens lib.Atg_MkldnnTranspose(ptr, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MkldnnTranspose() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MkldnnTranspose") return retVal, err } @@ -3763,6 +3974,7 @@ func(ts *Tensor) _MkldnnTranspose_(dim0 int64, dim1 int64)(err error) { lib.Atg_MkldnnTranspose_(ptr, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MkldnnTranspose_() failed: %w", err) return err } ts.ctensor = *ptr @@ -3779,9 +3991,10 @@ func(ts *Tensor) _MkldnnTransposeOut(out *Tensor, dim0 int64, dim1 int64, del bo lib.Atg_MkldnnTransposeOut(ptr, out.ctensor, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MkldnnTransposeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MkldnnTransposeOut") return retVal, err } @@ -3798,9 +4011,10 @@ strideLen := len(stride) dilationLen := len(dilation) lib.Atg_MpsConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MpsConvolution() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MpsConvolution") return retVal, err } @@ -3817,9 +4031,10 @@ strideLen := len(stride) dilationLen := len(dilation) lib.Atg_MpsConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MpsConvolutionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MpsConvolutionOut") return retVal, err } @@ -3837,9 +4052,10 @@ strideLen := len(stride) dilationLen := len(dilation) lib.Atg_MpsConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MpsConvolutionTranspose() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MpsConvolutionTranspose") return retVal, err } @@ -3857,9 +4073,10 @@ strideLen := len(stride) dilationLen := len(dilation) lib.Atg_MpsConvolutionTransposeOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("_MpsConvolutionTransposeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_MpsConvolutionTransposeOut") return retVal, err } @@ -3875,11 +4092,12 @@ func _NativeBatchNormLegit(input *Tensor, weight *Tensor, bias *Tensor, runningM if training { ctraining = int32(1) } lib.Atg_NativeBatchNormLegit(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeBatchNormLegit() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_NativeBatchNormLegit_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeBatchNormLegit_1") + retVal2 = newTensor(*ctensorPtr2, "_NativeBatchNormLegit_2") return retVal0, retVal1, retVal2, err } @@ -3897,13 +4115,14 @@ func _NativeBatchNormLegitFunctional(input *Tensor, weight *Tensor, bias *Tensor if training { ctraining = int32(1) } lib.Atg_NativeBatchNormLegitFunctional(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeBatchNormLegitFunctional() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} + retVal0 = newTensor(*ctensorPtr0, "_NativeBatchNormLegitFunctional_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeBatchNormLegitFunctional_1") + retVal2 = newTensor(*ctensorPtr2, "_NativeBatchNormLegitFunctional_2") + retVal3 = newTensor(*ctensorPtr3, "_NativeBatchNormLegitFunctional_3") + retVal4 = newTensor(*ctensorPtr4, "_NativeBatchNormLegitFunctional_4") return retVal0, retVal1, retVal2, retVal3, retVal4, err } @@ -3919,11 +4138,12 @@ func _NativeBatchNormLegitNoStats(input *Tensor, weight *Tensor, bias *Tensor, t if training { ctraining = int32(1) } lib.Atg_NativeBatchNormLegitNoStats(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, ctraining, momentum, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeBatchNormLegitNoStats() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_NativeBatchNormLegitNoStats_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeBatchNormLegitNoStats_1") + retVal2 = newTensor(*ctensorPtr2, "_NativeBatchNormLegitNoStats_2") return retVal0, retVal1, retVal2, err } @@ -3939,11 +4159,12 @@ func _NativeBatchNormLegitNoStatsOut(out *Tensor, saveMean *Tensor, saveInvstd * if training { ctraining = int32(1) } lib.Atg_NativeBatchNormLegitNoStatsOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, ctraining, momentum, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeBatchNormLegitNoStatsOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_NativeBatchNormLegitNoStatsOut_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeBatchNormLegitNoStatsOut_1") + retVal2 = newTensor(*ctensorPtr2, "_NativeBatchNormLegitNoStatsOut_2") return retVal0, retVal1, retVal2, err } @@ -3959,11 +4180,12 @@ func _NativeBatchNormLegitOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, if training { ctraining = int32(1) } lib.Atg_NativeBatchNormLegitOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeBatchNormLegitOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_NativeBatchNormLegitOut_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeBatchNormLegitOut_1") + retVal2 = newTensor(*ctensorPtr2, "_NativeBatchNormLegitOut_2") return retVal0, retVal1, retVal2, err } @@ -3982,12 +4204,13 @@ caverageAttnWeights := int32(0) if averageAttnWeights { caverageAttnWeights = int32(1) } lib.Atg_NativeDecoderOnlyMultiHeadAttention(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor, incrKey.ctensor, incrValue.ctensor, cneedWeights, caverageAttnWeights) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeDecoderOnlyMultiHeadAttention() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_NativeDecoderOnlyMultiHeadAttention_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeDecoderOnlyMultiHeadAttention_1") + retVal2 = newTensor(*ctensorPtr2, "_NativeDecoderOnlyMultiHeadAttention_2") + retVal3 = newTensor(*ctensorPtr3, "_NativeDecoderOnlyMultiHeadAttention_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -4006,12 +4229,13 @@ caverageAttnWeights := int32(0) if averageAttnWeights { caverageAttnWeights = int32(1) } lib.Atg_NativeDecoderOnlyMultiHeadAttentionOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor, incrKey.ctensor, incrValue.ctensor, cneedWeights, caverageAttnWeights) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeDecoderOnlyMultiHeadAttentionOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "_NativeDecoderOnlyMultiHeadAttentionOut_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeDecoderOnlyMultiHeadAttentionOut_1") + retVal2 = newTensor(*ctensorPtr2, "_NativeDecoderOnlyMultiHeadAttentionOut_2") + retVal3 = newTensor(*ctensorPtr3, "_NativeDecoderOnlyMultiHeadAttentionOut_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -4034,10 +4258,11 @@ var cmaskTypeVal int64 = 0 } lib.Atg_NativeMultiHeadAttention(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor, cneedWeights, caverageAttnWeights, cmaskTypeVal, cmaskTypeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeMultiHeadAttention() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_NativeMultiHeadAttention_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeMultiHeadAttention_1") return retVal0, retVal1, err } @@ -4060,10 +4285,11 @@ var cmaskTypeVal int64 = 0 } lib.Atg_NativeMultiHeadAttentionOut(ctensorPtr0, out0.ctensor, out1.ctensor, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor, cneedWeights, caverageAttnWeights, cmaskTypeVal, cmaskTypeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NativeMultiHeadAttentionOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_NativeMultiHeadAttentionOut_0") + retVal1 = newTensor(*ctensorPtr1, "_NativeMultiHeadAttentionOut_1") return retVal0, retVal1, err } @@ -4077,9 +4303,10 @@ func(ts *Tensor) _NegView(del bool)(retVal *Tensor, err error) { lib.Atg_NegView(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NegView() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NegView") return retVal, err } @@ -4093,9 +4320,10 @@ func(ts *Tensor) _NegViewCopy(del bool)(retVal *Tensor, err error) { lib.Atg_NegViewCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NegViewCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NegViewCopy") return retVal, err } @@ -4109,9 +4337,10 @@ func(ts *Tensor) _NegViewCopyOut(out *Tensor, del bool)(retVal *Tensor, err erro lib.Atg_NegViewCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NegViewCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NegViewCopyOut") return retVal, err } @@ -4126,9 +4355,10 @@ func _NestedFromPadded(padded *Tensor, cpuNestedShapeExample *Tensor, fuseTransf if fuseTransform0213 { cfuseTransform0213 = int32(1) } lib.Atg_NestedFromPadded(ptr, padded.ctensor, cpuNestedShapeExample.ctensor, cfuseTransform0213) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedFromPadded() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedFromPadded") return retVal, err } @@ -4141,9 +4371,10 @@ func _NestedFromPaddedAndNestedExample(padded *Tensor, ntExample *Tensor)(retVal lib.Atg_NestedFromPaddedAndNestedExample(ptr, padded.ctensor, ntExample.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedFromPaddedAndNestedExample() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedFromPaddedAndNestedExample") return retVal, err } @@ -4156,9 +4387,10 @@ func _NestedFromPaddedAndNestedExampleOut(out *Tensor, padded *Tensor, ntExample lib.Atg_NestedFromPaddedAndNestedExampleOut(ptr, out.ctensor, padded.ctensor, ntExample.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedFromPaddedAndNestedExampleOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedFromPaddedAndNestedExampleOut") return retVal, err } @@ -4173,9 +4405,10 @@ func _NestedFromPaddedOut(out *Tensor, padded *Tensor, cpuNestedShapeExample *Te if fuseTransform0213 { cfuseTransform0213 = int32(1) } lib.Atg_NestedFromPaddedOut(ptr, out.ctensor, padded.ctensor, cpuNestedShapeExample.ctensor, cfuseTransform0213) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedFromPaddedOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedFromPaddedOut") return retVal, err } @@ -4189,9 +4422,10 @@ func(ts *Tensor) _NestedSelectBackward(gradOutput *Tensor, dim int64, index int6 lib.Atg_NestedSelectBackward(ptr, gradOutput.ctensor, ts.ctensor, dim, index) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedSelectBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedSelectBackward") return retVal, err } @@ -4208,9 +4442,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.Atg_NestedSumBackward(ptr, grad.ctensor, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedSumBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedSumBackward") return retVal, err } @@ -4225,9 +4460,10 @@ func(ts *Tensor) _NestedViewFromBuffer(nestedSize *Tensor, nestedStrides *Tensor offsetsLen := len(offsets) lib.Atg_NestedViewFromBuffer(ptr, ts.ctensor, nestedSize.ctensor, nestedStrides.ctensor, offsets, offsetsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedViewFromBuffer() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedViewFromBuffer") return retVal, err } @@ -4242,9 +4478,10 @@ func(ts *Tensor) _NestedViewFromBufferCopy(nestedSize *Tensor, nestedStrides *Te offsetsLen := len(offsets) lib.Atg_NestedViewFromBufferCopy(ptr, ts.ctensor, nestedSize.ctensor, nestedStrides.ctensor, offsets, offsetsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedViewFromBufferCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedViewFromBufferCopy") return retVal, err } @@ -4259,9 +4496,10 @@ func(ts *Tensor) _NestedViewFromBufferCopyOut(out *Tensor, nestedSize *Tensor, n offsetsLen := len(offsets) lib.Atg_NestedViewFromBufferCopyOut(ptr, out.ctensor, ts.ctensor, nestedSize.ctensor, nestedStrides.ctensor, offsets, offsetsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NestedViewFromBufferCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NestedViewFromBufferCopyOut") return retVal, err } @@ -4275,9 +4513,10 @@ func(ts *Tensor) _NewZerosWithSameFeatureMeta(other *Tensor, selfNumBatchDims in lib.Atg_NewZerosWithSameFeatureMeta(ptr, ts.ctensor, other.ctensor, selfNumBatchDims) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NewZerosWithSameFeatureMeta() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NewZerosWithSameFeatureMeta") return retVal, err } @@ -4291,9 +4530,10 @@ func(ts *Tensor) _NewZerosWithSameFeatureMetaOut(out *Tensor, other *Tensor, sel lib.Atg_NewZerosWithSameFeatureMetaOut(ptr, out.ctensor, ts.ctensor, other.ctensor, selfNumBatchDims) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NewZerosWithSameFeatureMetaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NewZerosWithSameFeatureMetaOut") return retVal, err } @@ -4304,6 +4544,7 @@ func _NnpackAvailable()(retVal bool, err error) { retVal = lib.Atg_NnpackAvailable() if err = TorchErr(); err != nil { + err = fmt.Errorf("_NnpackAvailable() failed: %w", err) return retVal, err } return retVal, err @@ -4319,9 +4560,10 @@ func _NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padd strideLen := len(stride) lib.Atg_NnpackSpatialConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NnpackSpatialConvolution() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NnpackSpatialConvolution") return retVal, err } @@ -4336,9 +4578,10 @@ func _NnpackSpatialConvolutionOut(out *Tensor, input *Tensor, weight *Tensor, bi strideLen := len(stride) lib.Atg_NnpackSpatialConvolutionOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_NnpackSpatialConvolutionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_NnpackSpatialConvolutionOut") return retVal, err } @@ -4350,6 +4593,7 @@ func(ts *Tensor) _Nnz(del bool)(retVal int64, err error) { retVal = lib.Atg_Nnz(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Nnz() failed: %w", err) return retVal, err } return retVal, err @@ -4365,10 +4609,11 @@ func _PackPaddedSequence(input *Tensor, lengths *Tensor, batchFirst bool)(retVal if batchFirst { cbatchFirst = int32(1) } lib.Atg_PackPaddedSequence(ctensorPtr0, input.ctensor, lengths.ctensor, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PackPaddedSequence() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_PackPaddedSequence_0") + retVal1 = newTensor(*ctensorPtr1, "_PackPaddedSequence_1") return retVal0, retVal1, err } @@ -4384,9 +4629,10 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.Atg_PackPaddedSequenceBackward(ptr, grad.ctensor, inputSize, inputSizeLen, batchSizes.ctensor, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PackPaddedSequenceBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_PackPaddedSequenceBackward") return retVal, err } @@ -4401,10 +4647,11 @@ func _PackPaddedSequenceOut(out0 *Tensor, out1 *Tensor, input *Tensor, lengths * if batchFirst { cbatchFirst = int32(1) } lib.Atg_PackPaddedSequenceOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, lengths.ctensor, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PackPaddedSequenceOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_PackPaddedSequenceOut_0") + retVal1 = newTensor(*ctensorPtr1, "_PackPaddedSequenceOut_1") return retVal0, retVal1, err } @@ -4419,9 +4666,10 @@ func(ts *Tensor) _PadCircular(pad []int64, del bool)(retVal *Tensor, err error) padLen := len(pad) lib.Atg_PadCircular(ptr, ts.ctensor, pad, padLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PadCircular() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_PadCircular") return retVal, err } @@ -4442,9 +4690,10 @@ var cvalueVal float64 = 0.0 } lib.Atg_PadEnum(ptr, ts.ctensor, pad, padLen, mode, cvalueVal, cvalueNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PadEnum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_PadEnum") return retVal, err } @@ -4459,10 +4708,11 @@ func _PadPackedSequence(data *Tensor, batchSizes *Tensor, batchFirst bool, paddi if batchFirst { cbatchFirst = int32(1) } lib.Atg_PadPackedSequence(ctensorPtr0, data.ctensor, batchSizes.ctensor, cbatchFirst, paddingValue.cscalar, totalLength) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PadPackedSequence() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_PadPackedSequence_0") + retVal1 = newTensor(*ctensorPtr1, "_PadPackedSequence_1") return retVal0, retVal1, err } @@ -4476,9 +4726,10 @@ func(ts *Tensor) _PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool lib.Atg_PdistBackward(ptr, grad.ctensor, ts.ctensor, p, pdist.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PdistBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_PdistBackward") return retVal, err } @@ -4492,9 +4743,10 @@ func(ts *Tensor) _PdistBackwardOut(out *Tensor, grad *Tensor, p float64, pdist * lib.Atg_PdistBackwardOut(ptr, out.ctensor, grad.ctensor, ts.ctensor, p, pdist.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PdistBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_PdistBackwardOut") return retVal, err } @@ -4508,9 +4760,10 @@ func(ts *Tensor) _PinMemory(device gotch.Device, del bool)(retVal *Tensor, err e lib.Atg_PinMemory(ptr, ts.ctensor, device.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PinMemory() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_PinMemory") return retVal, err } @@ -4524,9 +4777,10 @@ func(ts *Tensor) _PinMemoryOut(out *Tensor, device gotch.Device, del bool)(retVa lib.Atg_PinMemoryOut(ptr, out.ctensor, ts.ctensor, device.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PinMemoryOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_PinMemoryOut") return retVal, err } @@ -4540,9 +4794,10 @@ func(ts *Tensor) _PreluKernel(weight *Tensor, del bool)(retVal *Tensor, err erro lib.Atg_PreluKernel(ptr, ts.ctensor, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PreluKernel() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_PreluKernel") return retVal, err } @@ -4556,10 +4811,11 @@ func(ts *Tensor) _PreluKernelBackward(gradOutput *Tensor, weight *Tensor, del bo lib.Atg_PreluKernelBackward(ctensorPtr0, gradOutput.ctensor, ts.ctensor, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_PreluKernelBackward() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_PreluKernelBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_PreluKernelBackward_1") return retVal0, retVal1, err } @@ -4573,9 +4829,10 @@ func(ts *Tensor) _RemoveBatchDim(level int64, batchSize int64, outDim int64, del lib.Atg_RemoveBatchDim(ptr, ts.ctensor, level, batchSize, outDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_RemoveBatchDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_RemoveBatchDim") return retVal, err } @@ -4591,9 +4848,10 @@ func(ts *Tensor) _ReshapeAlias(size []int64, stride []int64, del bool)(retVal *T strideLen := len(stride) lib.Atg_ReshapeAlias(ptr, ts.ctensor, size, sizeLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ReshapeAlias() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ReshapeAlias") return retVal, err } @@ -4609,9 +4867,10 @@ func(ts *Tensor) _ReshapeAliasCopy(size []int64, stride []int64, del bool)(retVa strideLen := len(stride) lib.Atg_ReshapeAliasCopy(ptr, ts.ctensor, size, sizeLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ReshapeAliasCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ReshapeAliasCopy") return retVal, err } @@ -4627,9 +4886,10 @@ func(ts *Tensor) _ReshapeAliasCopyOut(out *Tensor, size []int64, stride []int64, strideLen := len(stride) lib.Atg_ReshapeAliasCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ReshapeAliasCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ReshapeAliasCopyOut") return retVal, err } @@ -4644,9 +4904,10 @@ func(ts *Tensor) _ReshapeCopy(size []int64, del bool)(retVal *Tensor, err error) sizeLen := len(size) lib.Atg_ReshapeCopy(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ReshapeCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ReshapeCopy") return retVal, err } @@ -4660,9 +4921,10 @@ func(ts *Tensor) _ReshapeFromTensor(shape *Tensor, del bool)(retVal *Tensor, err lib.Atg_ReshapeFromTensor(ptr, ts.ctensor, shape.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ReshapeFromTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ReshapeFromTensor") return retVal, err } @@ -4677,9 +4939,10 @@ func(ts *Tensor) _ResizeOutput(size []int64, device gotch.Device, del bool)(retV sizeLen := len(size) lib.Atg_ResizeOutput(ptr, ts.ctensor, size, sizeLen, device.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ResizeOutput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ResizeOutput") return retVal, err } @@ -4693,6 +4956,7 @@ func(ts *Tensor) _ResizeOutput_(size []int64, device gotch.Device)(err error) { sizeLen := len(size) lib.Atg_ResizeOutput_(ptr, ts.ctensor, size, sizeLen, device.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ResizeOutput_() failed: %w", err) return err } ts.ctensor = *ptr @@ -4710,9 +4974,10 @@ func(ts *Tensor) _ResizeOutputOut(out *Tensor, size []int64, device gotch.Device sizeLen := len(size) lib.Atg_ResizeOutputOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, device.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ResizeOutputOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ResizeOutputOut") return retVal, err } @@ -4725,10 +4990,11 @@ func _RowwisePrune(weight *Tensor, mask *Tensor, compressedIndicesDtype gotch.DT lib.Atg_RowwisePrune(ctensorPtr0, weight.ctensor, mask.ctensor, compressedIndicesDtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_RowwisePrune() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_RowwisePrune_0") + retVal1 = newTensor(*ctensorPtr1, "_RowwisePrune_1") return retVal0, retVal1, err } @@ -4742,9 +5008,10 @@ func(ts *Tensor) _SampleDirichlet(del bool)(retVal *Tensor, err error) { lib.Atg_SampleDirichlet(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SampleDirichlet() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SampleDirichlet") return retVal, err } @@ -4758,9 +5025,10 @@ func(ts *Tensor) _SampleDirichletOut(out *Tensor, del bool)(retVal *Tensor, err lib.Atg_SampleDirichletOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SampleDirichletOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SampleDirichletOut") return retVal, err } @@ -4773,9 +5041,10 @@ func _SaturateWeightToFp16(weight *Tensor)(retVal *Tensor, err error) { lib.Atg_SaturateWeightToFp16(ptr, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SaturateWeightToFp16() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SaturateWeightToFp16") return retVal, err } @@ -4792,10 +5061,11 @@ cisCausal := int32(0) if isCausal { cisCausal = int32(1) } lib.Atg_ScaledDotProductAttention(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, attnMask.ctensor, dropoutP, cneedAttnWeights, cisCausal) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ScaledDotProductAttention() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_ScaledDotProductAttention_0") + retVal1 = newTensor(*ctensorPtr1, "_ScaledDotProductAttention_1") return retVal0, retVal1, err } @@ -4810,10 +5080,11 @@ func _ScaledDotProductAttentionMath(query *Tensor, key *Tensor, value *Tensor, a if isCausal { cisCausal = int32(1) } lib.Atg_ScaledDotProductAttentionMath(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, attnMask.ctensor, dropoutP, cisCausal, dropoutMask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ScaledDotProductAttentionMath() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_ScaledDotProductAttentionMath_0") + retVal1 = newTensor(*ctensorPtr1, "_ScaledDotProductAttentionMath_1") return retVal0, retVal1, err } @@ -4830,10 +5101,11 @@ cisCausal := int32(0) if isCausal { cisCausal = int32(1) } lib.Atg_ScaledDotProductEfficientAttention(ctensorPtr0, query.ctensor, key.ctensor, value.ctensor, ccomputeLogSumexp, cisCausal) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ScaledDotProductEfficientAttention() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_ScaledDotProductEfficientAttention_0") + retVal1 = newTensor(*ctensorPtr1, "_ScaledDotProductEfficientAttention_1") return retVal0, retVal1, err } @@ -4851,11 +5123,12 @@ cchunkGradOutputs := int32(0) if chunkGradOutputs { cchunkGradOutputs = int32(1) } lib.Atg_ScaledDotProductEfficientAttentionBackward(ctensorPtr0, gradOut_.ctensor, query.ctensor, key.ctensor, value.ctensor, out.ctensor, logsumexp.ctensor, cisCausal, cchunkGradOutputs) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ScaledDotProductEfficientAttentionBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_ScaledDotProductEfficientAttentionBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_ScaledDotProductEfficientAttentionBackward_1") + retVal2 = newTensor(*ctensorPtr2, "_ScaledDotProductEfficientAttentionBackward_2") return retVal0, retVal1, retVal2, err } @@ -4871,11 +5144,12 @@ func _ScaledDotProductFlashAttentionBackward(gradOut *Tensor, query *Tensor, key if isCausal { cisCausal = int32(1) } lib.Atg_ScaledDotProductFlashAttentionBackward(ctensorPtr0, gradOut.ctensor, query.ctensor, key.ctensor, value.ctensor, out.ctensor, logsumexp.ctensor, cumSeqQ.ctensor, cumSeqK.ctensor, maxQ, maxK, dropoutP, cisCausal, philoxSeed, philoxOffset) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ScaledDotProductFlashAttentionBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_ScaledDotProductFlashAttentionBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_ScaledDotProductFlashAttentionBackward_1") + retVal2 = newTensor(*ctensorPtr2, "_ScaledDotProductFlashAttentionBackward_2") return retVal0, retVal1, retVal2, err } @@ -4888,9 +5162,10 @@ func _SegmentReduceBackward(grad *Tensor, output *Tensor, data *Tensor, reduce s lib.Atg_SegmentReduceBackward(ptr, grad.ctensor, output.ctensor, data.ctensor, reduce, lengths.ctensor, offsets.ctensor, axis, initial.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SegmentReduceBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SegmentReduceBackward") return retVal, err } @@ -4903,9 +5178,10 @@ func _SegmentReduceBackwardOut(out *Tensor, grad *Tensor, output *Tensor, data * lib.Atg_SegmentReduceBackwardOut(ptr, out.ctensor, grad.ctensor, output.ctensor, data.ctensor, reduce, lengths.ctensor, offsets.ctensor, axis, initial.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SegmentReduceBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SegmentReduceBackwardOut") return retVal, err } @@ -4919,9 +5195,10 @@ func(ts *Tensor) _ShapeAsTensor(del bool)(retVal *Tensor, err error) { lib.Atg_ShapeAsTensor(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ShapeAsTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ShapeAsTensor") return retVal, err } @@ -4939,11 +5216,12 @@ strideLen := len(stride) paddingLen := len(padding) lib.Atg_SlowConv2dBackward(ctensorPtr0, gradInput.ctensor, gradWeight.ctensor, gradBias.ctensor, gradOutput.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SlowConv2dBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_SlowConv2dBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_SlowConv2dBackward_1") + retVal2 = newTensor(*ctensorPtr2, "_SlowConv2dBackward_2") return retVal0, retVal1, retVal2, err } @@ -4956,10 +5234,11 @@ func _SobolEngineDraw(quasi *Tensor, n int64, sobolstate *Tensor, dimension int6 lib.Atg_SobolEngineDraw(ctensorPtr0, quasi.ctensor, n, sobolstate.ctensor, dimension, numGenerated, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SobolEngineDraw() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_SobolEngineDraw_0") + retVal1 = newTensor(*ctensorPtr1, "_SobolEngineDraw_1") return retVal0, retVal1, err } @@ -4972,6 +5251,7 @@ func(ts *Tensor) _SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, n lib.Atg_SobolEngineFf_(ptr, ts.ctensor, n, sobolstate.ctensor, dimension, numGenerated) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SobolEngineFf_() failed: %w", err) return err } ts.ctensor = *ptr @@ -4987,6 +5267,7 @@ func(ts *Tensor) _SobolEngineInitializeState_(dimension int64)(err error) { lib.Atg_SobolEngineInitializeState_(ptr, ts.ctensor, dimension) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SobolEngineInitializeState_() failed: %w", err) return err } ts.ctensor = *ptr @@ -5002,6 +5283,7 @@ func(ts *Tensor) _SobolEngineScramble_(ltm *Tensor, dimension int64)(err error) lib.Atg_SobolEngineScramble_(ptr, ts.ctensor, ltm.ctensor, dimension) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SobolEngineScramble_() failed: %w", err) return err } ts.ctensor = *ptr @@ -5020,9 +5302,10 @@ func(ts *Tensor) _Softmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor, if halfToFloat { chalfToFloat = int32(1) } lib.Atg_Softmax(ptr, ts.ctensor, dim, chalfToFloat) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Softmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Softmax") return retVal, err } @@ -5035,9 +5318,10 @@ func _SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, inputDt lib.Atg_SoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SoftmaxBackwardData() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SoftmaxBackwardData") return retVal, err } @@ -5050,9 +5334,10 @@ func _SoftmaxBackwardDataOut(gradInput *Tensor, gradOutput *Tensor, output *Tens lib.Atg_SoftmaxBackwardDataOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor, dim, inputDtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SoftmaxBackwardDataOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SoftmaxBackwardDataOut") return retVal, err } @@ -5068,9 +5353,10 @@ func(ts *Tensor) _SoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del bool) if halfToFloat { chalfToFloat = int32(1) } lib.Atg_SoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SoftmaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SoftmaxOut") return retVal, err } @@ -5084,9 +5370,10 @@ func(ts *Tensor) _SparseAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tens lib.Atg_SparseAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseAddmm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseAddmm") return retVal, err } @@ -5100,9 +5387,10 @@ func(ts *Tensor) _SparseAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bo lib.Atg_SparseAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseAddmmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseAddmmOut") return retVal, err } @@ -5117,9 +5405,10 @@ func(ts *Tensor) _SparseBroadcastTo(size []int64, del bool)(retVal *Tensor, err sizeLen := len(size) lib.Atg_SparseBroadcastTo(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseBroadcastTo() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseBroadcastTo") return retVal, err } @@ -5134,9 +5423,10 @@ func(ts *Tensor) _SparseBroadcastToCopy(size []int64, del bool)(retVal *Tensor, sizeLen := len(size) lib.Atg_SparseBroadcastToCopy(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseBroadcastToCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseBroadcastToCopy") return retVal, err } @@ -5151,9 +5441,10 @@ func(ts *Tensor) _SparseBroadcastToCopyOut(out *Tensor, size []int64, del bool)( sizeLen := len(size) lib.Atg_SparseBroadcastToCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseBroadcastToCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseBroadcastToCopyOut") return retVal, err } @@ -5167,9 +5458,10 @@ func _SparseBscTensorUnsafe(ccolIndices *Tensor, rowIndices *Tensor, values *Ten sizeLen := len(size) lib.Atg_SparseBscTensorUnsafe(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseBscTensorUnsafe() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseBscTensorUnsafe") return retVal, err } @@ -5183,9 +5475,10 @@ func _SparseBsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Ten sizeLen := len(size) lib.Atg_SparseBsrTensorUnsafe(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseBsrTensorUnsafe() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseBsrTensorUnsafe") return retVal, err } @@ -5199,9 +5492,10 @@ func _SparseCompressedTensorUnsafe(compressedIndices *Tensor, plainIndices *Tens sizeLen := len(size) lib.Atg_SparseCompressedTensorUnsafe(ptr, compressedIndices.ctensor, plainIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCompressedTensorUnsafe() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCompressedTensorUnsafe") return retVal, err } @@ -5215,9 +5509,10 @@ func _SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optio sizeLen := len(size) lib.Atg_SparseCooTensorUnsafe(ptr, indices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCooTensorUnsafe() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCooTensorUnsafe") return retVal, err } @@ -5231,9 +5526,10 @@ func _SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, opt sizeLen := len(size) lib.Atg_SparseCooTensorWithDims(ptr, sparseDim, denseDim, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCooTensorWithDims() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCooTensorWithDims") return retVal, err } @@ -5247,9 +5543,10 @@ func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size [] sizeLen := len(size) lib.Atg_SparseCooTensorWithDimsAndTensors(ptr, sparseDim, denseDim, size, sizeLen, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCooTensorWithDimsAndTensors() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCooTensorWithDimsAndTensors") return retVal, err } @@ -5263,9 +5560,10 @@ func _SparseCooTensorWithDimsAndTensorsOut(out *Tensor, sparseDim int64, denseDi sizeLen := len(size) lib.Atg_SparseCooTensorWithDimsAndTensorsOut(ptr, out.ctensor, sparseDim, denseDim, size, sizeLen, indices.ctensor, values.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCooTensorWithDimsAndTensorsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCooTensorWithDimsAndTensorsOut") return retVal, err } @@ -5279,9 +5577,10 @@ func _SparseCooTensorWithDimsOut(out *Tensor, sparseDim int64, denseDim int64, s sizeLen := len(size) lib.Atg_SparseCooTensorWithDimsOut(ptr, out.ctensor, sparseDim, denseDim, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCooTensorWithDimsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCooTensorWithDimsOut") return retVal, err } @@ -5295,9 +5594,10 @@ func _SparseCscTensorUnsafe(ccolIndices *Tensor, rowIndices *Tensor, values *Ten sizeLen := len(size) lib.Atg_SparseCscTensorUnsafe(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCscTensorUnsafe() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCscTensorUnsafe") return retVal, err } @@ -5314,9 +5614,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.Atg_SparseCsrProd(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCsrProd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCsrProd") return retVal, err } @@ -5333,9 +5634,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.Atg_SparseCsrProdDimDtypeOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCsrProdDimDtypeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCsrProdDimDtypeOut") return retVal, err } @@ -5352,9 +5654,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.Atg_SparseCsrSum(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCsrSum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCsrSum") return retVal, err } @@ -5371,9 +5674,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.Atg_SparseCsrSumDimDtypeOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCsrSumDimDtypeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCsrSumDimDtypeOut") return retVal, err } @@ -5387,9 +5691,10 @@ func _SparseCsrTensorUnsafe(crowIndices *Tensor, colIndices *Tensor, values *Ten sizeLen := len(size) lib.Atg_SparseCsrTensorUnsafe(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseCsrTensorUnsafe() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseCsrTensorUnsafe") return retVal, err } @@ -5405,9 +5710,10 @@ func(ts *Tensor) _SparseLogSoftmax(dim int64, halfToFloat bool, del bool)(retVal if halfToFloat { chalfToFloat = int32(1) } lib.Atg_SparseLogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseLogSoftmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseLogSoftmax") return retVal, err } @@ -5421,9 +5727,10 @@ func(ts *Tensor) _SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tenso lib.Atg_SparseLogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseLogSoftmaxBackwardData() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseLogSoftmaxBackwardData") return retVal, err } @@ -5437,9 +5744,10 @@ func(ts *Tensor) _SparseLogSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tenso lib.Atg_SparseLogSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseLogSoftmaxBackwardDataOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseLogSoftmaxBackwardDataOut") return retVal, err } @@ -5453,9 +5761,10 @@ func(ts *Tensor) _SparseLogSoftmaxInt(dim int64, dtype gotch.DType, del bool)(re lib.Atg_SparseLogSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseLogSoftmaxInt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseLogSoftmaxInt") return retVal, err } @@ -5471,9 +5780,10 @@ func(ts *Tensor) _SparseLogSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, if halfToFloat { chalfToFloat = int32(1) } lib.Atg_SparseLogSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseLogSoftmaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseLogSoftmaxOut") return retVal, err } @@ -5486,9 +5796,10 @@ func _SparseMm(sparse *Tensor, dense *Tensor)(retVal *Tensor, err error) { lib.Atg_SparseMm(ptr, sparse.ctensor, dense.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseMm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseMm") return retVal, err } @@ -5501,9 +5812,10 @@ func _SparseMmReduce(sparse *Tensor, dense *Tensor, reduce string)(retVal *Tenso lib.Atg_SparseMmReduce(ptr, sparse.ctensor, dense.ctensor, reduce) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseMmReduce() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseMmReduce") return retVal, err } @@ -5517,10 +5829,11 @@ func(ts *Tensor) _SparseMmReduceImpl(other *Tensor, reduce string, del bool)(ret lib.Atg_SparseMmReduceImpl(ctensorPtr0, ts.ctensor, other.ctensor, reduce) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseMmReduceImpl() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_SparseMmReduceImpl_0") + retVal1 = newTensor(*ctensorPtr1, "_SparseMmReduceImpl_1") return retVal0, retVal1, err } @@ -5536,9 +5849,10 @@ func(ts *Tensor) _SparseSoftmax(dim int64, halfToFloat bool, del bool)(retVal *T if halfToFloat { chalfToFloat = int32(1) } lib.Atg_SparseSoftmax(ptr, ts.ctensor, dim, chalfToFloat) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSoftmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSoftmax") return retVal, err } @@ -5552,9 +5866,10 @@ func(ts *Tensor) _SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, lib.Atg_SparseSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSoftmaxBackwardData() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSoftmaxBackwardData") return retVal, err } @@ -5568,9 +5883,10 @@ func(ts *Tensor) _SparseSoftmaxBackwardDataOut(out *Tensor, gradOutput *Tensor, lib.Atg_SparseSoftmaxBackwardDataOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSoftmaxBackwardDataOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSoftmaxBackwardDataOut") return retVal, err } @@ -5584,9 +5900,10 @@ func(ts *Tensor) _SparseSoftmaxInt(dim int64, dtype gotch.DType, del bool)(retVa lib.Atg_SparseSoftmaxInt(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSoftmaxInt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSoftmaxInt") return retVal, err } @@ -5602,9 +5919,10 @@ func(ts *Tensor) _SparseSoftmaxOut(out *Tensor, dim int64, halfToFloat bool, del if halfToFloat { chalfToFloat = int32(1) } lib.Atg_SparseSoftmaxOut(ptr, out.ctensor, ts.ctensor, dim, chalfToFloat) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSoftmaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSoftmaxOut") return retVal, err } @@ -5618,9 +5936,10 @@ func(ts *Tensor) _SparseSparseMatmul(other *Tensor, del bool)(retVal *Tensor, er lib.Atg_SparseSparseMatmul(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSparseMatmul() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSparseMatmul") return retVal, err } @@ -5634,9 +5953,10 @@ func(ts *Tensor) _SparseSparseMatmulOut(out *Tensor, other *Tensor, del bool)(re lib.Atg_SparseSparseMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSparseMatmulOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSparseMatmulOut") return retVal, err } @@ -5650,9 +5970,10 @@ func(ts *Tensor) _SparseSum(del bool)(retVal *Tensor, err error) { lib.Atg_SparseSum(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSum") return retVal, err } @@ -5667,9 +5988,10 @@ func(ts *Tensor) _SparseSumBackward(grad *Tensor, dim []int64, del bool)(retVal dimLen := len(dim) lib.Atg_SparseSumBackward(ptr, grad.ctensor, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSumBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSumBackward") return retVal, err } @@ -5684,9 +6006,10 @@ func(ts *Tensor) _SparseSumBackwardOut(out *Tensor, grad *Tensor, dim []int64, d dimLen := len(dim) lib.Atg_SparseSumBackwardOut(ptr, out.ctensor, grad.ctensor, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSumBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSumBackwardOut") return retVal, err } @@ -5701,9 +6024,10 @@ func(ts *Tensor) _SparseSumDim(dim []int64, del bool)(retVal *Tensor, err error) dimLen := len(dim) lib.Atg_SparseSumDim(ptr, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSumDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSumDim") return retVal, err } @@ -5718,9 +6042,10 @@ func(ts *Tensor) _SparseSumDimDtype(dim []int64, dtype gotch.DType, del bool)(re dimLen := len(dim) lib.Atg_SparseSumDimDtype(ptr, ts.ctensor, dim, dimLen, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSumDimDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSumDimDtype") return retVal, err } @@ -5735,9 +6060,10 @@ func(ts *Tensor) _SparseSumDimOut(out *Tensor, dim []int64, del bool)(retVal *Te dimLen := len(dim) lib.Atg_SparseSumDimOut(ptr, out.ctensor, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSumDimOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSumDimOut") return retVal, err } @@ -5751,9 +6077,10 @@ func(ts *Tensor) _SparseSumDtype(dtype gotch.DType, del bool)(retVal *Tensor, er lib.Atg_SparseSumDtype(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SparseSumDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SparseSumDtype") return retVal, err } @@ -5767,9 +6094,10 @@ func _Spdiags(diagonals *Tensor, offsets *Tensor, shape []int64, layout Layout)( shapeLen := len(shape) lib.Atg_Spdiags(ptr, diagonals.ctensor, offsets.ctensor, shape, shapeLen, int8(layout)) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Spdiags() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Spdiags") return retVal, err } @@ -5783,9 +6111,10 @@ func _SpdiagsOut(out *Tensor, diagonals *Tensor, offsets *Tensor, shape []int64, shapeLen := len(shape) lib.Atg_SpdiagsOut(ptr, out.ctensor, diagonals.ctensor, offsets.ctensor, shape, shapeLen, int8(layout)) if err = TorchErr(); err != nil { + err = fmt.Errorf("_SpdiagsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_SpdiagsOut") return retVal, err } @@ -5800,9 +6129,10 @@ func _Stack(tensors []*Tensor, dim int64)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.Atg_Stack(ptr, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Stack() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Stack") return retVal, err } @@ -5817,9 +6147,10 @@ func _StackOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err er for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.Atg_StackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_StackOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_StackOut") return retVal, err } @@ -5833,9 +6164,10 @@ func(ts *Tensor) _StandardGamma(del bool)(retVal *Tensor, err error) { lib.Atg_StandardGamma(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_StandardGamma() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_StandardGamma") return retVal, err } @@ -5849,9 +6181,10 @@ func(ts *Tensor) _StandardGammaGrad(output *Tensor, del bool)(retVal *Tensor, er lib.Atg_StandardGammaGrad(ptr, ts.ctensor, output.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_StandardGammaGrad() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_StandardGammaGrad") return retVal, err } @@ -5865,9 +6198,10 @@ func(ts *Tensor) _StandardGammaGradOut(out *Tensor, output *Tensor, del bool)(re lib.Atg_StandardGammaGradOut(ptr, out.ctensor, ts.ctensor, output.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_StandardGammaGradOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_StandardGammaGradOut") return retVal, err } @@ -5881,9 +6215,10 @@ func(ts *Tensor) _StandardGammaOut(out *Tensor, del bool)(retVal *Tensor, err er lib.Atg_StandardGammaOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_StandardGammaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_StandardGammaOut") return retVal, err } @@ -5896,9 +6231,10 @@ func _TestAmbiguousDefaults(dummy *Tensor, a int64, b int64)(retVal *Tensor, err lib.Atg_TestAmbiguousDefaults(ptr, dummy.ctensor, a, b) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestAmbiguousDefaults() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestAmbiguousDefaults") return retVal, err } @@ -5911,9 +6247,10 @@ func _TestAmbiguousDefaultsB(dummy *Tensor, a int64, b string)(retVal *Tensor, e lib.Atg_TestAmbiguousDefaultsB(ptr, dummy.ctensor, a, b) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestAmbiguousDefaultsB() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestAmbiguousDefaultsB") return retVal, err } @@ -5927,9 +6264,10 @@ func(ts *Tensor) _TestAutogradMultipleDispatch(del bool)(retVal *Tensor, err err lib.Atg_TestAutogradMultipleDispatch(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestAutogradMultipleDispatch() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestAutogradMultipleDispatch") return retVal, err } @@ -5943,9 +6281,10 @@ func(ts *Tensor) _TestAutogradMultipleDispatchFullcoverageOut(out *Tensor, del b lib.Atg_TestAutogradMultipleDispatchFullcoverageOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestAutogradMultipleDispatchFullcoverageOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestAutogradMultipleDispatchFullcoverageOut") return retVal, err } @@ -5961,9 +6300,10 @@ func(ts *Tensor) _TestAutogradMultipleDispatchNtonly(b bool, del bool)(retVal *T if b { cb = int32(1) } lib.Atg_TestAutogradMultipleDispatchNtonly(ptr, ts.ctensor, cb) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestAutogradMultipleDispatchNtonly() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestAutogradMultipleDispatchNtonly") return retVal, err } @@ -5977,9 +6317,10 @@ func(ts *Tensor) _TestAutogradMultipleDispatchView(del bool)(retVal *Tensor, err lib.Atg_TestAutogradMultipleDispatchView(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestAutogradMultipleDispatchView() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestAutogradMultipleDispatchView") return retVal, err } @@ -5993,9 +6334,10 @@ func(ts *Tensor) _TestAutogradMultipleDispatchViewCopy(del bool)(retVal *Tensor, lib.Atg_TestAutogradMultipleDispatchViewCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestAutogradMultipleDispatchViewCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestAutogradMultipleDispatchViewCopy") return retVal, err } @@ -6009,9 +6351,10 @@ func(ts *Tensor) _TestAutogradMultipleDispatchViewCopyOut(out *Tensor, del bool) lib.Atg_TestAutogradMultipleDispatchViewCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestAutogradMultipleDispatchViewCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestAutogradMultipleDispatchViewCopyOut") return retVal, err } @@ -6025,9 +6368,10 @@ func(ts *Tensor) _TestCheckTensor(del bool)(retVal *Tensor, err error) { lib.Atg_TestCheckTensor(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestCheckTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestCheckTensor") return retVal, err } @@ -6041,9 +6385,10 @@ func _TestOptionalFilledIntlist(values *Tensor, addends []int64)(retVal *Tensor, addendsLen := len(addends) lib.Atg_TestOptionalFilledIntlist(ptr, values.ctensor, addends, addendsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestOptionalFilledIntlist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestOptionalFilledIntlist") return retVal, err } @@ -6057,9 +6402,10 @@ func _TestOptionalFilledIntlistOut(out *Tensor, values *Tensor, addends []int64) addendsLen := len(addends) lib.Atg_TestOptionalFilledIntlistOut(ptr, out.ctensor, values.ctensor, addends, addendsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestOptionalFilledIntlistOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestOptionalFilledIntlistOut") return retVal, err } @@ -6073,9 +6419,10 @@ func _TestOptionalFloatlist(values *Tensor, addends []float64)(retVal *Tensor, e addendsLen := len(addends) lib.Atg_TestOptionalFloatlist(ptr, values.ctensor, addends, addendsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestOptionalFloatlist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestOptionalFloatlist") return retVal, err } @@ -6089,9 +6436,10 @@ func _TestOptionalFloatlistOut(out *Tensor, values *Tensor, addends []float64)(r addendsLen := len(addends) lib.Atg_TestOptionalFloatlistOut(ptr, out.ctensor, values.ctensor, addends, addendsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestOptionalFloatlistOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestOptionalFloatlistOut") return retVal, err } @@ -6105,9 +6453,10 @@ func _TestOptionalIntlist(values *Tensor, addends []int64)(retVal *Tensor, err e addendsLen := len(addends) lib.Atg_TestOptionalIntlist(ptr, values.ctensor, addends, addendsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestOptionalIntlist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestOptionalIntlist") return retVal, err } @@ -6121,9 +6470,10 @@ func _TestOptionalIntlistOut(out *Tensor, values *Tensor, addends []int64)(retVa addendsLen := len(addends) lib.Atg_TestOptionalIntlistOut(ptr, out.ctensor, values.ctensor, addends, addendsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestOptionalIntlistOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestOptionalIntlistOut") return retVal, err } @@ -6137,9 +6487,10 @@ func(ts *Tensor) _TestSerializationSubcmul(other *Tensor, del bool)(retVal *Tens lib.Atg_TestSerializationSubcmul(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestSerializationSubcmul() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestSerializationSubcmul") return retVal, err } @@ -6152,9 +6503,10 @@ func _TestStringDefault(dummy *Tensor, a string, b string)(retVal *Tensor, err e lib.Atg_TestStringDefault(ptr, dummy.ctensor, a, b) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestStringDefault() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestStringDefault") return retVal, err } @@ -6168,9 +6520,10 @@ func(ts *Tensor) _TestWarnInAutograd(del bool)(retVal *Tensor, err error) { lib.Atg_TestWarnInAutograd(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestWarnInAutograd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestWarnInAutograd") return retVal, err } @@ -6184,9 +6537,10 @@ func(ts *Tensor) _TestWarnInAutogradOut(out *Tensor, del bool)(retVal *Tensor, e lib.Atg_TestWarnInAutogradOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TestWarnInAutogradOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TestWarnInAutogradOut") return retVal, err } @@ -6202,9 +6556,10 @@ func(ts *Tensor) _ToCopy(optionsKind gotch.DType, optionsDevice gotch.Device, no if nonBlocking { cnonBlocking = int32(1) } lib.Atg_ToCopy(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ToCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ToCopy") return retVal, err } @@ -6220,9 +6575,10 @@ func(ts *Tensor) _ToCopyOut(out *Tensor, nonBlocking bool, del bool)(retVal *Ten if nonBlocking { cnonBlocking = int32(1) } lib.Atg_ToCopyOut(ptr, out.ctensor, ts.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ToCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ToCopyOut") return retVal, err } @@ -6236,9 +6592,10 @@ func(ts *Tensor) _ToDense(dtype gotch.DType, del bool)(retVal *Tensor, err error lib.Atg_ToDense(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ToDense() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ToDense") return retVal, err } @@ -6252,9 +6609,10 @@ func(ts *Tensor) _ToDenseOut(out *Tensor, dtype gotch.DType, del bool)(retVal *T lib.Atg_ToDenseOut(ptr, out.ctensor, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ToDenseOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ToDenseOut") return retVal, err } @@ -6268,11 +6626,12 @@ func _TransformBiasRescaleQkv(qkv *Tensor, qkvBias *Tensor, numHeads int64)(retV lib.Atg_TransformBiasRescaleQkv(ctensorPtr0, qkv.ctensor, qkvBias.ctensor, numHeads) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TransformBiasRescaleQkv() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_TransformBiasRescaleQkv_0") + retVal1 = newTensor(*ctensorPtr1, "_TransformBiasRescaleQkv_1") + retVal2 = newTensor(*ctensorPtr2, "_TransformBiasRescaleQkv_2") return retVal0, retVal1, retVal2, err } @@ -6286,11 +6645,12 @@ func _TransformBiasRescaleQkvOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, qkv * lib.Atg_TransformBiasRescaleQkvOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, qkv.ctensor, qkvBias.ctensor, numHeads) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TransformBiasRescaleQkvOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_TransformBiasRescaleQkvOut_0") + retVal1 = newTensor(*ctensorPtr1, "_TransformBiasRescaleQkvOut_1") + retVal2 = newTensor(*ctensorPtr2, "_TransformBiasRescaleQkvOut_2") return retVal0, retVal1, retVal2, err } @@ -6308,11 +6668,12 @@ cnormFirst := int32(0) if normFirst { cnormFirst = int32(1) } lib.Atg_TransformerDecoderOnlyLayerFwd(ctensorPtr0, src.ctensor, embedDim, numHeads, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, cuseGelu, cnormFirst, eps, normWeight1.ctensor, normBias1.ctensor, normWeight2.ctensor, normBias2.ctensor, ffnWeight1.ctensor, ffnBias1.ctensor, ffnWeight2.ctensor, ffnBias2.ctensor, mask.ctensor, incrKey.ctensor, incrValue.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TransformerDecoderOnlyLayerFwd() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_TransformerDecoderOnlyLayerFwd_0") + retVal1 = newTensor(*ctensorPtr1, "_TransformerDecoderOnlyLayerFwd_1") + retVal2 = newTensor(*ctensorPtr2, "_TransformerDecoderOnlyLayerFwd_2") return retVal0, retVal1, retVal2, err } @@ -6330,11 +6691,12 @@ cnormFirst := int32(0) if normFirst { cnormFirst = int32(1) } lib.Atg_TransformerDecoderOnlyLayerFwdOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, src.ctensor, embedDim, numHeads, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, cuseGelu, cnormFirst, eps, normWeight1.ctensor, normBias1.ctensor, normWeight2.ctensor, normBias2.ctensor, ffnWeight1.ctensor, ffnBias1.ctensor, ffnWeight2.ctensor, ffnBias2.ctensor, mask.ctensor, incrKey.ctensor, incrValue.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TransformerDecoderOnlyLayerFwdOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_TransformerDecoderOnlyLayerFwdOut_0") + retVal1 = newTensor(*ctensorPtr1, "_TransformerDecoderOnlyLayerFwdOut_1") + retVal2 = newTensor(*ctensorPtr2, "_TransformerDecoderOnlyLayerFwdOut_2") return retVal0, retVal1, retVal2, err } @@ -6357,9 +6719,10 @@ var cmaskTypeVal int64 = 0 } lib.Atg_TransformerEncoderLayerFwd(ptr, src.ctensor, embedDim, numHeads, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, cuseGelu, cnormFirst, eps, normWeight1.ctensor, normBias1.ctensor, normWeight2.ctensor, normBias2.ctensor, ffnWeight1.ctensor, ffnBias1.ctensor, ffnWeight2.ctensor, ffnBias2.ctensor, mask.ctensor, cmaskTypeVal, cmaskTypeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TransformerEncoderLayerFwd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TransformerEncoderLayerFwd") return retVal, err } @@ -6382,9 +6745,10 @@ var cmaskTypeVal int64 = 0 } lib.Atg_TransformerEncoderLayerFwdOut(ptr, out.ctensor, src.ctensor, embedDim, numHeads, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, cuseGelu, cnormFirst, eps, normWeight1.ctensor, normBias1.ctensor, normWeight2.ctensor, normBias2.ctensor, ffnWeight1.ctensor, ffnBias1.ctensor, ffnWeight2.ctensor, ffnBias2.ctensor, mask.ctensor, cmaskTypeVal, cmaskTypeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TransformerEncoderLayerFwdOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TransformerEncoderLayerFwdOut") return retVal, err } @@ -6401,9 +6765,10 @@ expand3Len := len(expand3) sumdimLen := len(sumdim) lib.Atg_Trilinear(ptr, i1.ctensor, i2.ctensor, i3.ctensor, expand1, expand1Len, expand2, expand2Len, expand3, expand3Len, sumdim, sumdimLen, unrollDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Trilinear() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Trilinear") return retVal, err } @@ -6420,9 +6785,10 @@ expand3Len := len(expand3) sumdimLen := len(sumdim) lib.Atg_TrilinearOut(ptr, out.ctensor, i1.ctensor, i2.ctensor, i3.ctensor, expand1, expand1Len, expand2, expand2Len, expand3, expand3Len, sumdim, sumdimLen, unrollDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TrilinearOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TrilinearOut") return retVal, err } @@ -6435,9 +6801,10 @@ func _TritonMultiHeadAttention(query *Tensor, key *Tensor, value *Tensor, embedD lib.Atg_TritonMultiHeadAttention(ptr, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TritonMultiHeadAttention() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TritonMultiHeadAttention") return retVal, err } @@ -6450,9 +6817,10 @@ func _TritonMultiHeadAttentionOut(out *Tensor, query *Tensor, key *Tensor, value lib.Atg_TritonMultiHeadAttentionOut(ptr, out.ctensor, query.ctensor, key.ctensor, value.ctensor, embedDim, numHead, qkvWeight.ctensor, qkvBias.ctensor, projWeight.ctensor, projBias.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TritonMultiHeadAttentionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TritonMultiHeadAttentionOut") return retVal, err } @@ -6465,9 +6833,10 @@ func _TritonScaledDotAttention(q *Tensor, k *Tensor, v *Tensor, dropoutP float64 lib.Atg_TritonScaledDotAttention(ptr, q.ctensor, k.ctensor, v.ctensor, dropoutP) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TritonScaledDotAttention() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TritonScaledDotAttention") return retVal, err } @@ -6480,9 +6849,10 @@ func _TritonScaledDotAttentionOut(out *Tensor, q *Tensor, k *Tensor, v *Tensor, lib.Atg_TritonScaledDotAttentionOut(ptr, out.ctensor, q.ctensor, k.ctensor, v.ctensor, dropoutP) if err = TorchErr(); err != nil { + err = fmt.Errorf("_TritonScaledDotAttentionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_TritonScaledDotAttentionOut") return retVal, err } @@ -6500,10 +6870,11 @@ creturnInverse := int32(0) if returnInverse { creturnInverse = int32(1) } lib.Atg_Unique(ctensorPtr0, ts.ctensor, csorted, creturnInverse) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Unique() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_Unique_0") + retVal1 = newTensor(*ctensorPtr1, "_Unique_1") return retVal0, retVal1, err } @@ -6524,11 +6895,12 @@ creturnCounts := int32(0) if returnCounts { creturnCounts = int32(1) } lib.Atg_Unique2(ctensorPtr0, ts.ctensor, csorted, creturnInverse, creturnCounts) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Unique2() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_Unique2_0") + retVal1 = newTensor(*ctensorPtr1, "_Unique2_1") + retVal2 = newTensor(*ctensorPtr2, "_Unique2_2") return retVal0, retVal1, retVal2, err } @@ -6549,11 +6921,12 @@ creturnCounts := int32(0) if returnCounts { creturnCounts = int32(1) } lib.Atg_Unique2Out(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, ts.ctensor, csorted, creturnInverse, creturnCounts) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Unique2Out() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "_Unique2Out_0") + retVal1 = newTensor(*ctensorPtr1, "_Unique2Out_1") + retVal2 = newTensor(*ctensorPtr2, "_Unique2Out_2") return retVal0, retVal1, retVal2, err } @@ -6571,10 +6944,11 @@ creturnInverse := int32(0) if returnInverse { creturnInverse = int32(1) } lib.Atg_UniqueOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, csorted, creturnInverse) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UniqueOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_UniqueOut_0") + retVal1 = newTensor(*ctensorPtr1, "_UniqueOut_1") return retVal0, retVal1, err } @@ -6587,10 +6961,11 @@ func _UnpackDual(dual *Tensor, level int64)(retVal0 *Tensor, retVal1 *Tensor, er lib.Atg_UnpackDual(ctensorPtr0, dual.ctensor, level) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UnpackDual() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_UnpackDual_0") + retVal1 = newTensor(*ctensorPtr1, "_UnpackDual_1") return retVal0, retVal1, err } @@ -6605,9 +6980,10 @@ func(ts *Tensor) _UnsafeView(size []int64, del bool)(retVal *Tensor, err error) sizeLen := len(size) lib.Atg_UnsafeView(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UnsafeView() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UnsafeView") return retVal, err } @@ -6622,9 +6998,10 @@ func(ts *Tensor) _UnsafeViewOut(out *Tensor, size []int64, del bool)(retVal *Ten sizeLen := len(size) lib.Atg_UnsafeViewOut(ptr, out.ctensor, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UnsafeViewOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UnsafeViewOut") return retVal, err } @@ -6653,9 +7030,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleBicubic2dAa(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBicubic2dAa() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBicubic2dAa") return retVal, err } @@ -6684,9 +7062,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleBicubic2dAaBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBicubic2dAaBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBicubic2dAaBackward") return retVal, err } @@ -6715,9 +7094,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleBicubic2dAaBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBicubic2dAaBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBicubic2dAaBackwardGradInput") return retVal, err } @@ -6746,9 +7126,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleBicubic2dAaOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBicubic2dAaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBicubic2dAaOut") return retVal, err } @@ -6765,9 +7146,10 @@ calignCorners := int32(0) scaleFactorsLen := len(scaleFactors) lib.Atg_UpsampleBicubic2dAaVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBicubic2dAaVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBicubic2dAaVec") return retVal, err } @@ -6796,9 +7178,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleBilinear2dAa(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBilinear2dAa() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBilinear2dAa") return retVal, err } @@ -6827,9 +7210,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleBilinear2dAaBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBilinear2dAaBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBilinear2dAaBackward") return retVal, err } @@ -6858,9 +7242,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleBilinear2dAaBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBilinear2dAaBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBilinear2dAaBackwardGradInput") return retVal, err } @@ -6889,9 +7274,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleBilinear2dAaOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBilinear2dAaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBilinear2dAaOut") return retVal, err } @@ -6908,9 +7294,10 @@ calignCorners := int32(0) scaleFactorsLen := len(scaleFactors) lib.Atg_UpsampleBilinear2dAaVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleBilinear2dAaVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleBilinear2dAaVec") return retVal, err } @@ -6931,9 +7318,10 @@ var cscalesVal float64 = 0.0 } lib.Atg_UpsampleNearestExact1d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact1d") return retVal, err } @@ -6954,9 +7342,10 @@ var cscalesVal float64 = 0.0 } lib.Atg_UpsampleNearestExact1dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact1dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact1dBackward") return retVal, err } @@ -6977,9 +7366,10 @@ var cscalesVal float64 = 0.0 } lib.Atg_UpsampleNearestExact1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact1dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact1dBackwardGradInput") return retVal, err } @@ -7000,9 +7390,10 @@ var cscalesVal float64 = 0.0 } lib.Atg_UpsampleNearestExact1dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact1dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact1dOut") return retVal, err } @@ -7017,9 +7408,10 @@ func _UpsampleNearestExact1dVec(input *Tensor, outputSize []int64, scaleFactors scaleFactorsLen := len(scaleFactors) lib.Atg_UpsampleNearestExact1dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact1dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact1dVec") return retVal, err } @@ -7046,9 +7438,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleNearestExact2d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact2d") return retVal, err } @@ -7075,9 +7468,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleNearestExact2dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact2dBackward") return retVal, err } @@ -7104,9 +7498,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleNearestExact2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact2dBackwardGradInput") return retVal, err } @@ -7133,9 +7528,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleNearestExact2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact2dOut") return retVal, err } @@ -7150,9 +7546,10 @@ func _UpsampleNearestExact2dVec(input *Tensor, outputSize []int64, scaleFactors scaleFactorsLen := len(scaleFactors) lib.Atg_UpsampleNearestExact2dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact2dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact2dVec") return retVal, err } @@ -7185,9 +7582,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleNearestExact3d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact3d") return retVal, err } @@ -7220,9 +7618,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleNearestExact3dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact3dBackward") return retVal, err } @@ -7255,9 +7654,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleNearestExact3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact3dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact3dBackwardGradInput") return retVal, err } @@ -7290,9 +7690,10 @@ var cscalesWVal float64 = 0.0 } lib.Atg_UpsampleNearestExact3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact3dOut") return retVal, err } @@ -7307,9 +7708,10 @@ func _UpsampleNearestExact3dVec(input *Tensor, outputSize []int64, scaleFactors scaleFactorsLen := len(scaleFactors) lib.Atg_UpsampleNearestExact3dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UpsampleNearestExact3dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_UpsampleNearestExact3dVec") return retVal, err } @@ -7322,6 +7724,7 @@ func _UseCudnnCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, t targetLengthsLen := len(targetLengths) retVal = lib.Atg_UseCudnnCtcLoss(logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UseCudnnCtcLoss() failed: %w", err) return retVal, err } return retVal, err @@ -7333,6 +7736,7 @@ func _UseCudnnCtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Ten retVal = lib.Atg_UseCudnnCtcLossTensor(logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank) if err = TorchErr(); err != nil { + err = fmt.Errorf("_UseCudnnCtcLossTensor() failed: %w", err) return retVal, err } return retVal, err @@ -7344,6 +7748,7 @@ func _UseCudnnRnnFlattenWeight()(retVal bool, err error) { retVal = lib.Atg_UseCudnnRnnFlattenWeight() if err = TorchErr(); err != nil { + err = fmt.Errorf("_UseCudnnRnnFlattenWeight() failed: %w", err) return retVal, err } return retVal, err @@ -7358,9 +7763,10 @@ func(ts *Tensor) _Values(del bool)(retVal *Tensor, err error) { lib.Atg_Values(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Values() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_Values") return retVal, err } @@ -7374,9 +7780,10 @@ func(ts *Tensor) _ValuesCopy(del bool)(retVal *Tensor, err error) { lib.Atg_ValuesCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ValuesCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ValuesCopy") return retVal, err } @@ -7390,9 +7797,10 @@ func(ts *Tensor) _ValuesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error lib.Atg_ValuesCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_ValuesCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_ValuesCopyOut") return retVal, err } @@ -7404,6 +7812,7 @@ func(ts *Tensor) _Version(del bool)(retVal int64, err error) { retVal = lib.Atg_Version(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("_Version() failed: %w", err) return retVal, err } return retVal, err @@ -7417,9 +7826,10 @@ func _WeightNorm(v *Tensor, g *Tensor, dim int64)(retVal *Tensor, err error) { lib.Atg_WeightNorm(ptr, v.ctensor, g.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_WeightNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "_WeightNorm") return retVal, err } @@ -7432,10 +7842,11 @@ func _WeightNormDifferentiableBackward(gradW *Tensor, savedV *Tensor, savedG *Te lib.Atg_WeightNormDifferentiableBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_WeightNormDifferentiableBackward() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_WeightNormDifferentiableBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_WeightNormDifferentiableBackward_1") return retVal0, retVal1, err } @@ -7448,10 +7859,11 @@ func _WeightNormInterface(v *Tensor, g *Tensor, dim int64)(retVal0 *Tensor, retV lib.Atg_WeightNormInterface(ctensorPtr0, v.ctensor, g.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_WeightNormInterface() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_WeightNormInterface_0") + retVal1 = newTensor(*ctensorPtr1, "_WeightNormInterface_1") return retVal0, retVal1, err } @@ -7464,10 +7876,11 @@ func _WeightNormInterfaceBackward(gradW *Tensor, savedV *Tensor, savedG *Tensor, lib.Atg_WeightNormInterfaceBackward(ctensorPtr0, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_WeightNormInterfaceBackward() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_WeightNormInterfaceBackward_0") + retVal1 = newTensor(*ctensorPtr1, "_WeightNormInterfaceBackward_1") return retVal0, retVal1, err } @@ -7480,10 +7893,11 @@ func _WeightNormInterfaceBackwardOut(out0 *Tensor, out1 *Tensor, gradW *Tensor, lib.Atg_WeightNormInterfaceBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, gradW.ctensor, savedV.ctensor, savedG.ctensor, savedNorms.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_WeightNormInterfaceBackwardOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_WeightNormInterfaceBackwardOut_0") + retVal1 = newTensor(*ctensorPtr1, "_WeightNormInterfaceBackwardOut_1") return retVal0, retVal1, err } @@ -7496,10 +7910,11 @@ func _WeightNormInterfaceOut(out0 *Tensor, out1 *Tensor, v *Tensor, g *Tensor, d lib.Atg_WeightNormInterfaceOut(ctensorPtr0, out0.ctensor, out1.ctensor, v.ctensor, g.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("_WeightNormInterfaceOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "_WeightNormInterfaceOut_0") + retVal1 = newTensor(*ctensorPtr1, "_WeightNormInterfaceOut_1") return retVal0, retVal1, err } @@ -7513,9 +7928,10 @@ func(ts *Tensor) Abs(del bool)(retVal *Tensor, err error) { lib.AtgAbs(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Abs() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Abs") return retVal, err } @@ -7528,6 +7944,7 @@ func(ts *Tensor) Abs_()(err error) { lib.AtgAbs_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Abs_() failed: %w", err) return err } ts.ctensor = *ptr @@ -7544,9 +7961,10 @@ func(ts *Tensor) AbsOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAbsOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AbsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AbsOut") return retVal, err } @@ -7560,9 +7978,10 @@ func(ts *Tensor) Absolute(del bool)(retVal *Tensor, err error) { lib.AtgAbsolute(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Absolute() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Absolute") return retVal, err } @@ -7575,6 +7994,7 @@ func(ts *Tensor) Absolute_()(err error) { lib.AtgAbsolute_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Absolute_() failed: %w", err) return err } ts.ctensor = *ptr @@ -7591,9 +8011,10 @@ func(ts *Tensor) AbsoluteOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAbsoluteOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AbsoluteOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AbsoluteOut") return retVal, err } @@ -7607,9 +8028,10 @@ func(ts *Tensor) Acos(del bool)(retVal *Tensor, err error) { lib.AtgAcos(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Acos() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Acos") return retVal, err } @@ -7622,6 +8044,7 @@ func(ts *Tensor) Acos_()(err error) { lib.AtgAcos_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Acos_() failed: %w", err) return err } ts.ctensor = *ptr @@ -7638,9 +8061,10 @@ func(ts *Tensor) AcosOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAcosOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AcosOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AcosOut") return retVal, err } @@ -7654,9 +8078,10 @@ func(ts *Tensor) Acosh(del bool)(retVal *Tensor, err error) { lib.AtgAcosh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Acosh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Acosh") return retVal, err } @@ -7669,6 +8094,7 @@ func(ts *Tensor) Acosh_()(err error) { lib.AtgAcosh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Acosh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -7685,9 +8111,10 @@ func(ts *Tensor) AcoshOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAcoshOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AcoshOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AcoshOut") return retVal, err } @@ -7702,9 +8129,10 @@ func(ts *Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool)(retVal *Tensor, outputSizeLen := len(outputSize) lib.AtgAdaptiveAvgPool1d(ptr, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveAvgPool1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveAvgPool1d") return retVal, err } @@ -7719,9 +8147,10 @@ func(ts *Tensor) AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor, outputSizeLen := len(outputSize) lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveAvgPool2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveAvgPool2d") return retVal, err } @@ -7736,9 +8165,10 @@ func(ts *Tensor) AdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool) outputSizeLen := len(outputSize) lib.AtgAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveAvgPool2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveAvgPool2dOut") return retVal, err } @@ -7753,9 +8183,10 @@ func(ts *Tensor) AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor, outputSizeLen := len(outputSize) lib.AtgAdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveAvgPool3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveAvgPool3d") return retVal, err } @@ -7769,9 +8200,10 @@ func(ts *Tensor) AdaptiveAvgPool3dBackward(gradInput *Tensor, gradOutput *Tensor lib.AtgAdaptiveAvgPool3dBackward(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveAvgPool3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveAvgPool3dBackward") return retVal, err } @@ -7786,9 +8218,10 @@ func(ts *Tensor) AdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool) outputSizeLen := len(outputSize) lib.AtgAdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveAvgPool3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveAvgPool3dOut") return retVal, err } @@ -7803,10 +8236,11 @@ func(ts *Tensor) AdaptiveMaxPool1d(outputSize []int64, del bool)(retVal0 *Tensor outputSizeLen := len(outputSize) lib.AtgAdaptiveMaxPool1d(ctensorPtr0, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool1d() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "AdaptiveMaxPool1d_0") + retVal1 = newTensor(*ctensorPtr1, "AdaptiveMaxPool1d_1") return retVal0, retVal1, err } @@ -7821,10 +8255,11 @@ func(ts *Tensor) AdaptiveMaxPool2d(outputSize []int64, del bool)(retVal0 *Tensor outputSizeLen := len(outputSize) lib.AtgAdaptiveMaxPool2d(ctensorPtr0, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool2d() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "AdaptiveMaxPool2d_0") + retVal1 = newTensor(*ctensorPtr1, "AdaptiveMaxPool2d_1") return retVal0, retVal1, err } @@ -7838,9 +8273,10 @@ func(ts *Tensor) AdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, lib.AtgAdaptiveMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveMaxPool2dBackward") return retVal, err } @@ -7854,9 +8290,10 @@ func(ts *Tensor) AdaptiveMaxPool2dBackwardGradInput(gradInput *Tensor, gradOutpu lib.AtgAdaptiveMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveMaxPool2dBackwardGradInput") return retVal, err } @@ -7871,10 +8308,11 @@ func(ts *Tensor) AdaptiveMaxPool2dOut(out *Tensor, indices *Tensor, outputSize [ outputSizeLen := len(outputSize) lib.AtgAdaptiveMaxPool2dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool2dOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "AdaptiveMaxPool2dOut_0") + retVal1 = newTensor(*ctensorPtr1, "AdaptiveMaxPool2dOut_1") return retVal0, retVal1, err } @@ -7889,10 +8327,11 @@ func(ts *Tensor) AdaptiveMaxPool3d(outputSize []int64, del bool)(retVal0 *Tensor outputSizeLen := len(outputSize) lib.AtgAdaptiveMaxPool3d(ctensorPtr0, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool3d() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "AdaptiveMaxPool3d_0") + retVal1 = newTensor(*ctensorPtr1, "AdaptiveMaxPool3d_1") return retVal0, retVal1, err } @@ -7906,9 +8345,10 @@ func(ts *Tensor) AdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, lib.AtgAdaptiveMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveMaxPool3dBackward") return retVal, err } @@ -7922,9 +8362,10 @@ func(ts *Tensor) AdaptiveMaxPool3dBackwardGradInput(gradInput *Tensor, gradOutpu lib.AtgAdaptiveMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool3dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AdaptiveMaxPool3dBackwardGradInput") return retVal, err } @@ -7939,10 +8380,11 @@ func(ts *Tensor) AdaptiveMaxPool3dOut(out *Tensor, indices *Tensor, outputSize [ outputSizeLen := len(outputSize) lib.AtgAdaptiveMaxPool3dOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("AdaptiveMaxPool3dOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "AdaptiveMaxPool3dOut_0") + retVal1 = newTensor(*ctensorPtr1, "AdaptiveMaxPool3dOut_1") return retVal0, retVal1, err } @@ -7956,9 +8398,10 @@ func(ts *Tensor) Add(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAdd(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Add() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Add") return retVal, err } @@ -7971,6 +8414,7 @@ func(ts *Tensor) Add_(other *Tensor)(err error) { lib.AtgAdd_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Add_() failed: %w", err) return err } ts.ctensor = *ptr @@ -7987,9 +8431,10 @@ func(ts *Tensor) AddOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, er lib.AtgAddOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddOut") return retVal, err } @@ -8003,9 +8448,10 @@ func(ts *Tensor) AddScalar(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgAddScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddScalar") return retVal, err } @@ -8018,6 +8464,7 @@ func(ts *Tensor) AddScalar_(other *Scalar)(err error) { lib.AtgAddScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8034,9 +8481,10 @@ func(ts *Tensor) AddScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tens lib.AtgAddScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddScalarOut") return retVal, err } @@ -8050,9 +8498,10 @@ func(ts *Tensor) Addbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor lib.AtgAddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addbmm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Addbmm") return retVal, err } @@ -8065,6 +8514,7 @@ func(ts *Tensor) Addbmm_(batch1 *Tensor, batch2 *Tensor)(err error) { lib.AtgAddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addbmm_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8081,9 +8531,10 @@ func(ts *Tensor) AddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool lib.AtgAddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddbmmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddbmmOut") return retVal, err } @@ -8097,9 +8548,10 @@ func(ts *Tensor) Addcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Ten lib.AtgAddcdiv(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addcdiv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Addcdiv") return retVal, err } @@ -8112,6 +8564,7 @@ func(ts *Tensor) Addcdiv_(tensor1 *Tensor, tensor2 *Tensor)(err error) { lib.AtgAddcdiv_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addcdiv_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8128,9 +8581,10 @@ func(ts *Tensor) AddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del b lib.AtgAddcdivOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddcdivOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddcdivOut") return retVal, err } @@ -8144,9 +8598,10 @@ func(ts *Tensor) Addcmul(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Ten lib.AtgAddcmul(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addcmul() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Addcmul") return retVal, err } @@ -8159,6 +8614,7 @@ func(ts *Tensor) Addcmul_(tensor1 *Tensor, tensor2 *Tensor)(err error) { lib.AtgAddcmul_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addcmul_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8175,9 +8631,10 @@ func(ts *Tensor) AddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del b lib.AtgAddcmulOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddcmulOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddcmulOut") return retVal, err } @@ -8191,9 +8648,10 @@ func(ts *Tensor) Addmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err lib.AtgAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addmm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Addmm") return retVal, err } @@ -8206,6 +8664,7 @@ func(ts *Tensor) Addmm_(mat1 *Tensor, mat2 *Tensor)(err error) { lib.AtgAddmm_(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addmm_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8222,9 +8681,10 @@ func(ts *Tensor) AddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(ret lib.AtgAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddmmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddmmOut") return retVal, err } @@ -8238,9 +8698,10 @@ func(ts *Tensor) Addmv(mat *Tensor, vec *Tensor, del bool)(retVal *Tensor, err e lib.AtgAddmv(ptr, ts.ctensor, mat.ctensor, vec.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addmv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Addmv") return retVal, err } @@ -8253,6 +8714,7 @@ func(ts *Tensor) Addmv_(mat *Tensor, vec *Tensor)(err error) { lib.AtgAddmv_(ptr, ts.ctensor, mat.ctensor, vec.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addmv_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8269,9 +8731,10 @@ func(ts *Tensor) AddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool)(retVa lib.AtgAddmvOut(ptr, out.ctensor, ts.ctensor, mat.ctensor, vec.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddmvOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddmvOut") return retVal, err } @@ -8285,9 +8748,10 @@ func(ts *Tensor) Addr(vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err lib.AtgAddr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Addr") return retVal, err } @@ -8300,6 +8764,7 @@ func(ts *Tensor) Addr_(vec1 *Tensor, vec2 *Tensor)(err error) { lib.AtgAddr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Addr_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8316,9 +8781,10 @@ func(ts *Tensor) AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool)(retV lib.AtgAddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AddrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AddrOut") return retVal, err } @@ -8332,9 +8798,10 @@ func(ts *Tensor) Adjoint(del bool)(retVal *Tensor, err error) { lib.AtgAdjoint(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Adjoint() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Adjoint") return retVal, err } @@ -8350,9 +8817,10 @@ calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } lib.AtgAffineGridGenerator(ptr, theta.ctensor, size, sizeLen, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("AffineGridGenerator() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AffineGridGenerator") return retVal, err } @@ -8368,9 +8836,10 @@ calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } lib.AtgAffineGridGeneratorBackward(ptr, grad.ctensor, size, sizeLen, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("AffineGridGeneratorBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AffineGridGeneratorBackward") return retVal, err } @@ -8386,9 +8855,10 @@ calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } lib.AtgAffineGridGeneratorOut(ptr, out.ctensor, theta.ctensor, size, sizeLen, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("AffineGridGeneratorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AffineGridGeneratorOut") return retVal, err } @@ -8402,9 +8872,10 @@ func(ts *Tensor) Alias(del bool)(retVal *Tensor, err error) { lib.AtgAlias(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Alias() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Alias") return retVal, err } @@ -8418,9 +8889,10 @@ func(ts *Tensor) AliasCopy(del bool)(retVal *Tensor, err error) { lib.AtgAliasCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AliasCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AliasCopy") return retVal, err } @@ -8434,9 +8906,10 @@ func(ts *Tensor) AliasCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgAliasCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AliasCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AliasCopyOut") return retVal, err } @@ -8450,9 +8923,10 @@ func(ts *Tensor) AlignAs(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAlignAs(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AlignAs() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AlignAs") return retVal, err } @@ -8466,9 +8940,10 @@ func(ts *Tensor) All(del bool)(retVal *Tensor, err error) { lib.AtgAll(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("All() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "All") return retVal, err } @@ -8482,9 +8957,10 @@ func(ts *Tensor) AllAllOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAllAllOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AllAllOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AllAllOut") return retVal, err } @@ -8500,9 +8976,10 @@ func(ts *Tensor) AllDim(dim int64, keepdim bool, del bool)(retVal *Tensor, err e if keepdim { ckeepdim = int32(1) } lib.AtgAllDim(ptr, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("AllDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AllDim") return retVal, err } @@ -8518,9 +8995,10 @@ func(ts *Tensor) AllOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal * if keepdim { ckeepdim = int32(1) } lib.AtgAllOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("AllOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AllOut") return retVal, err } @@ -8534,6 +9012,7 @@ func(ts *Tensor) Allclose(other *Tensor, rtol float64, atol float64, equalNan bo if equalNan { cequalNan = int32(1) } retVal = lib.AtgAllclose(ts.ctensor, other.ctensor, rtol, atol, cequalNan) if err = TorchErr(); err != nil { + err = fmt.Errorf("Allclose() failed: %w", err) return retVal, err } return retVal, err @@ -8549,9 +9028,10 @@ func AlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor, err erro if train { ctrain = int32(1) } lib.AtgAlphaDropout(ptr, input.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("AlphaDropout() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AlphaDropout") return retVal, err } @@ -8566,6 +9046,7 @@ func(ts *Tensor) AlphaDropout_(p float64, train bool)(err error) { if train { ctrain = int32(1) } lib.AtgAlphaDropout_(ptr, ts.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("AlphaDropout_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8585,9 +9066,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgAmax(ptr, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Amax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Amax") return retVal, err } @@ -8604,9 +9086,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgAmaxOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("AmaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AmaxOut") return retVal, err } @@ -8623,9 +9106,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgAmin(ptr, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Amin() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Amin") return retVal, err } @@ -8642,9 +9126,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgAminOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("AminOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AminOut") return retVal, err } @@ -8666,10 +9151,11 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgAminmax(ctensorPtr0, ts.ctensor, cdimVal, cdimNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Aminmax() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Aminmax_0") + retVal1 = newTensor(*ctensorPtr1, "Aminmax_1") return retVal0, retVal1, err } @@ -8691,10 +9177,11 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgAminmaxOut(ctensorPtr0, min.ctensor, max.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("AminmaxOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "AminmaxOut_0") + retVal1 = newTensor(*ctensorPtr1, "AminmaxOut_1") return retVal0, retVal1, err } @@ -8708,9 +9195,10 @@ func(ts *Tensor) Angle(del bool)(retVal *Tensor, err error) { lib.AtgAngle(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Angle() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Angle") return retVal, err } @@ -8724,9 +9212,10 @@ func(ts *Tensor) AngleOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAngleOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AngleOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AngleOut") return retVal, err } @@ -8740,9 +9229,10 @@ func(ts *Tensor) Any(del bool)(retVal *Tensor, err error) { lib.AtgAny(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Any() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Any") return retVal, err } @@ -8756,9 +9246,10 @@ func(ts *Tensor) AnyAllOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAnyAllOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AnyAllOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AnyAllOut") return retVal, err } @@ -8774,9 +9265,10 @@ func(ts *Tensor) AnyDim(dim int64, keepdim bool, del bool)(retVal *Tensor, err e if keepdim { ckeepdim = int32(1) } lib.AtgAnyDim(ptr, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("AnyDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AnyDim") return retVal, err } @@ -8792,9 +9284,10 @@ func(ts *Tensor) AnyOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal * if keepdim { ckeepdim = int32(1) } lib.AtgAnyOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("AnyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AnyOut") return retVal, err } @@ -8807,9 +9300,10 @@ func Arange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(re lib.AtgArange(ptr, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arange() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arange") return retVal, err } @@ -8822,9 +9316,10 @@ func ArangeStart(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDev lib.AtgArangeStart(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArangeStart() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArangeStart") return retVal, err } @@ -8837,9 +9332,10 @@ func ArangeStartStep(start *Scalar, end *Scalar, optionsKind gotch.DType, option lib.AtgArangeStartStep(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArangeStartStep() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArangeStartStep") return retVal, err } @@ -8853,9 +9349,10 @@ func(ts *Tensor) Arccos(del bool)(retVal *Tensor, err error) { lib.AtgArccos(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arccos() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arccos") return retVal, err } @@ -8868,6 +9365,7 @@ func(ts *Tensor) Arccos_()(err error) { lib.AtgArccos_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arccos_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8884,9 +9382,10 @@ func(ts *Tensor) ArccosOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgArccosOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArccosOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArccosOut") return retVal, err } @@ -8900,9 +9399,10 @@ func(ts *Tensor) Arccosh(del bool)(retVal *Tensor, err error) { lib.AtgArccosh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arccosh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arccosh") return retVal, err } @@ -8915,6 +9415,7 @@ func(ts *Tensor) Arccosh_()(err error) { lib.AtgArccosh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arccosh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8931,9 +9432,10 @@ func(ts *Tensor) ArccoshOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgArccoshOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArccoshOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArccoshOut") return retVal, err } @@ -8947,9 +9449,10 @@ func(ts *Tensor) Arcsin(del bool)(retVal *Tensor, err error) { lib.AtgArcsin(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arcsin() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arcsin") return retVal, err } @@ -8962,6 +9465,7 @@ func(ts *Tensor) Arcsin_()(err error) { lib.AtgArcsin_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arcsin_() failed: %w", err) return err } ts.ctensor = *ptr @@ -8978,9 +9482,10 @@ func(ts *Tensor) ArcsinOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgArcsinOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArcsinOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArcsinOut") return retVal, err } @@ -8994,9 +9499,10 @@ func(ts *Tensor) Arcsinh(del bool)(retVal *Tensor, err error) { lib.AtgArcsinh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arcsinh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arcsinh") return retVal, err } @@ -9009,6 +9515,7 @@ func(ts *Tensor) Arcsinh_()(err error) { lib.AtgArcsinh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arcsinh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9025,9 +9532,10 @@ func(ts *Tensor) ArcsinhOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgArcsinhOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArcsinhOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArcsinhOut") return retVal, err } @@ -9041,9 +9549,10 @@ func(ts *Tensor) Arctan(del bool)(retVal *Tensor, err error) { lib.AtgArctan(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arctan() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arctan") return retVal, err } @@ -9057,9 +9566,10 @@ func(ts *Tensor) Arctan2(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgArctan2(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arctan2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arctan2") return retVal, err } @@ -9072,6 +9582,7 @@ func(ts *Tensor) Arctan2_(other *Tensor)(err error) { lib.AtgArctan2_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arctan2_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9088,9 +9599,10 @@ func(ts *Tensor) Arctan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor lib.AtgArctan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arctan2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arctan2Out") return retVal, err } @@ -9103,6 +9615,7 @@ func(ts *Tensor) Arctan_()(err error) { lib.AtgArctan_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arctan_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9119,9 +9632,10 @@ func(ts *Tensor) ArctanOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgArctanOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArctanOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArctanOut") return retVal, err } @@ -9135,9 +9649,10 @@ func(ts *Tensor) Arctanh(del bool)(retVal *Tensor, err error) { lib.AtgArctanh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arctanh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Arctanh") return retVal, err } @@ -9150,6 +9665,7 @@ func(ts *Tensor) Arctanh_()(err error) { lib.AtgArctanh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Arctanh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9166,9 +9682,10 @@ func(ts *Tensor) ArctanhOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgArctanhOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArctanhOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArctanhOut") return retVal, err } @@ -9190,9 +9707,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgArgmax(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Argmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Argmax") return retVal, err } @@ -9214,9 +9732,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgArgmaxOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArgmaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArgmaxOut") return retVal, err } @@ -9238,9 +9757,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgArgmin(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Argmin() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Argmin") return retVal, err } @@ -9262,9 +9782,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgArgminOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArgminOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArgminOut") return retVal, err } @@ -9280,9 +9801,10 @@ func(ts *Tensor) Argsort(dim int64, descending bool, del bool)(retVal *Tensor, e if descending { cdescending = int32(1) } lib.AtgArgsort(ptr, ts.ctensor, dim, cdescending) if err = TorchErr(); err != nil { + err = fmt.Errorf("Argsort() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Argsort") return retVal, err } @@ -9300,9 +9822,10 @@ cdescending := int32(0) if descending { cdescending = int32(1) } lib.AtgArgsortStable(ptr, ts.ctensor, cstable, dim, cdescending) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArgsortStable() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArgsortStable") return retVal, err } @@ -9320,9 +9843,10 @@ cdescending := int32(0) if descending { cdescending = int32(1) } lib.AtgArgsortStableOut(ptr, out.ctensor, ts.ctensor, cstable, dim, cdescending) if err = TorchErr(); err != nil { + err = fmt.Errorf("ArgsortStableOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ArgsortStableOut") return retVal, err } @@ -9336,9 +9860,10 @@ func(ts *Tensor) Argwhere(del bool)(retVal *Tensor, err error) { lib.AtgArgwhere(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Argwhere() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Argwhere") return retVal, err } @@ -9360,9 +9885,10 @@ var cstorageOffsetVal int64 = 0 } lib.AtgAsStrided(ptr, ts.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AsStrided() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AsStrided") return retVal, err } @@ -9383,6 +9909,7 @@ var cstorageOffsetVal int64 = 0 } lib.AtgAsStrided_(ptr, ts.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AsStrided_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9407,9 +9934,10 @@ var cstorageOffsetVal int64 = 0 } lib.AtgAsStridedCopy(ptr, ts.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AsStridedCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AsStridedCopy") return retVal, err } @@ -9431,9 +9959,10 @@ var cstorageOffsetVal int64 = 0 } lib.AtgAsStridedCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AsStridedCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AsStridedCopyOut") return retVal, err } @@ -9455,9 +9984,10 @@ var cstorageOffsetVal int64 = 0 } lib.AtgAsStridedScatter(ptr, ts.ctensor, src.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AsStridedScatter() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AsStridedScatter") return retVal, err } @@ -9479,9 +10009,10 @@ var cstorageOffsetVal int64 = 0 } lib.AtgAsStridedScatterOut(ptr, out.ctensor, ts.ctensor, src.ctensor, size, sizeLen, stride, strideLen, cstorageOffsetVal, cstorageOffsetNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AsStridedScatterOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AsStridedScatterOut") return retVal, err } @@ -9495,9 +10026,10 @@ func(ts *Tensor) Asin(del bool)(retVal *Tensor, err error) { lib.AtgAsin(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Asin() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Asin") return retVal, err } @@ -9510,6 +10042,7 @@ func(ts *Tensor) Asin_()(err error) { lib.AtgAsin_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Asin_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9526,9 +10059,10 @@ func(ts *Tensor) AsinOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAsinOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AsinOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AsinOut") return retVal, err } @@ -9542,9 +10076,10 @@ func(ts *Tensor) Asinh(del bool)(retVal *Tensor, err error) { lib.AtgAsinh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Asinh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Asinh") return retVal, err } @@ -9557,6 +10092,7 @@ func(ts *Tensor) Asinh_()(err error) { lib.AtgAsinh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Asinh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9573,9 +10109,10 @@ func(ts *Tensor) AsinhOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAsinhOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AsinhOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AsinhOut") return retVal, err } @@ -9589,9 +10126,10 @@ func(ts *Tensor) Atan(del bool)(retVal *Tensor, err error) { lib.AtgAtan(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atan() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Atan") return retVal, err } @@ -9605,9 +10143,10 @@ func(ts *Tensor) Atan2(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAtan2(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atan2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Atan2") return retVal, err } @@ -9620,6 +10159,7 @@ func(ts *Tensor) Atan2_(other *Tensor)(err error) { lib.AtgAtan2_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atan2_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9636,9 +10176,10 @@ func(ts *Tensor) Atan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor, lib.AtgAtan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atan2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Atan2Out") return retVal, err } @@ -9651,6 +10192,7 @@ func(ts *Tensor) Atan_()(err error) { lib.AtgAtan_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atan_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9667,9 +10209,10 @@ func(ts *Tensor) AtanOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAtanOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AtanOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AtanOut") return retVal, err } @@ -9683,9 +10226,10 @@ func(ts *Tensor) Atanh(del bool)(retVal *Tensor, err error) { lib.AtgAtanh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atanh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Atanh") return retVal, err } @@ -9698,6 +10242,7 @@ func(ts *Tensor) Atanh_()(err error) { lib.AtgAtanh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atanh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -9714,9 +10259,10 @@ func(ts *Tensor) AtanhOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgAtanhOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("AtanhOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AtanhOut") return retVal, err } @@ -9730,9 +10276,10 @@ func(ts *Tensor) Atleast1d(del bool)(retVal *Tensor, err error) { lib.AtgAtleast1d(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atleast1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Atleast1d") return retVal, err } @@ -9746,9 +10293,10 @@ func(ts *Tensor) Atleast2d(del bool)(retVal *Tensor, err error) { lib.AtgAtleast2d(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atleast2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Atleast2d") return retVal, err } @@ -9762,9 +10310,10 @@ func(ts *Tensor) Atleast3d(del bool)(retVal *Tensor, err error) { lib.AtgAtleast3d(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Atleast3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Atleast3d") return retVal, err } @@ -9785,9 +10334,10 @@ ccountIncludePad := int32(0) if countIncludePad { ccountIncludePad = int32(1) } lib.AtgAvgPool1d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool1d") return retVal, err } @@ -9814,9 +10364,10 @@ var cdivisorOverrideVal int64 = 0 } lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool2d") return retVal, err } @@ -9843,9 +10394,10 @@ var cdivisorOverrideVal int64 = 0 } lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool2dBackward") return retVal, err } @@ -9872,9 +10424,10 @@ var cdivisorOverrideVal int64 = 0 } lib.AtgAvgPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool2dBackwardGradInput") return retVal, err } @@ -9901,9 +10454,10 @@ var cdivisorOverrideVal int64 = 0 } lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool2dOut") return retVal, err } @@ -9930,9 +10484,10 @@ var cdivisorOverrideVal int64 = 0 } lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool3d") return retVal, err } @@ -9959,9 +10514,10 @@ var cdivisorOverrideVal int64 = 0 } lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool3dBackward") return retVal, err } @@ -9988,9 +10544,10 @@ var cdivisorOverrideVal int64 = 0 } lib.AtgAvgPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool3dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool3dBackwardGradInput") return retVal, err } @@ -10017,9 +10574,10 @@ var cdivisorOverrideVal int64 = 0 } lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("AvgPool3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "AvgPool3dOut") return retVal, err } @@ -10033,9 +10591,10 @@ func(ts *Tensor) Baddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tenso lib.AtgBaddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Baddbmm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Baddbmm") return retVal, err } @@ -10048,6 +10607,7 @@ func(ts *Tensor) Baddbmm_(batch1 *Tensor, batch2 *Tensor)(err error) { lib.AtgBaddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Baddbmm_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10064,9 +10624,10 @@ func(ts *Tensor) BaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del boo lib.AtgBaddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BaddbmmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BaddbmmOut") return retVal, err } @@ -10079,9 +10640,10 @@ func BartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice g lib.AtgBartlettWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("BartlettWindow() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BartlettWindow") return retVal, err } @@ -10094,9 +10656,10 @@ func BartlettWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err erro lib.AtgBartlettWindowOut(ptr, out.ctensor, windowLength) if err = TorchErr(); err != nil { + err = fmt.Errorf("BartlettWindowOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BartlettWindowOut") return retVal, err } @@ -10111,9 +10674,10 @@ func BartlettWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch if periodic { cperiodic = int32(1) } lib.AtgBartlettWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("BartlettWindowPeriodic() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BartlettWindowPeriodic") return retVal, err } @@ -10128,9 +10692,10 @@ func BartlettWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(r if periodic { cperiodic = int32(1) } lib.AtgBartlettWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic) if err = TorchErr(); err != nil { + err = fmt.Errorf("BartlettWindowPeriodicOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BartlettWindowPeriodicOut") return retVal, err } @@ -10147,9 +10712,10 @@ ccudnnEnabled := int32(0) if cudnnEnabled { ccudnnEnabled = int32(1) } lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps, ccudnnEnabled) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BatchNorm") return retVal, err } @@ -10162,9 +10728,10 @@ func BatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd lib.AtgBatchNormBackwardElemt(ptr, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor, count.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormBackwardElemt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BatchNormBackwardElemt") return retVal, err } @@ -10177,9 +10744,10 @@ func BatchNormBackwardElemtOut(out *Tensor, gradOut *Tensor, input *Tensor, mean lib.AtgBatchNormBackwardElemtOut(ptr, out.ctensor, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor, count.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormBackwardElemtOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BatchNormBackwardElemtOut") return retVal, err } @@ -10200,12 +10768,13 @@ cbiasG := int32(0) if biasG { cbiasG = int32(1) } lib.AtgBatchNormBackwardReduce(ctensorPtr0, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, cinputG, cweightG, cbiasG) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormBackwardReduce() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "BatchNormBackwardReduce_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormBackwardReduce_1") + retVal2 = newTensor(*ctensorPtr2, "BatchNormBackwardReduce_2") + retVal3 = newTensor(*ctensorPtr3, "BatchNormBackwardReduce_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -10226,12 +10795,13 @@ cbiasG := int32(0) if biasG { cbiasG = int32(1) } lib.AtgBatchNormBackwardReduceOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, cinputG, cweightG, cbiasG) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormBackwardReduceOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "BatchNormBackwardReduceOut_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormBackwardReduceOut_1") + retVal2 = newTensor(*ctensorPtr2, "BatchNormBackwardReduceOut_2") + retVal3 = newTensor(*ctensorPtr3, "BatchNormBackwardReduceOut_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -10244,9 +10814,10 @@ func BatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, i lib.AtgBatchNormElemt(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormElemt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BatchNormElemt") return retVal, err } @@ -10259,9 +10830,10 @@ func BatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, lib.AtgBatchNormElemtOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormElemtOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BatchNormElemtOut") return retVal, err } @@ -10274,10 +10846,11 @@ func BatchNormGatherStats(input *Tensor, mean *Tensor, invstd *Tensor, runningMe lib.AtgBatchNormGatherStats(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, count) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormGatherStats() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "BatchNormGatherStats_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormGatherStats_1") return retVal0, retVal1, err } @@ -10290,10 +10863,11 @@ func BatchNormGatherStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, mean *Te lib.AtgBatchNormGatherStatsOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, count) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormGatherStatsOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "BatchNormGatherStatsOut_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormGatherStatsOut_1") return retVal0, retVal1, err } @@ -10306,10 +10880,11 @@ func BatchNormGatherStatsWithCounts(input *Tensor, mean *Tensor, invstd *Tensor, lib.AtgBatchNormGatherStatsWithCounts(ctensorPtr0, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, counts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormGatherStatsWithCounts() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "BatchNormGatherStatsWithCounts_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormGatherStatsWithCounts_1") return retVal0, retVal1, err } @@ -10322,10 +10897,11 @@ func BatchNormGatherStatsWithCountsOut(out0 *Tensor, out1 *Tensor, input *Tensor lib.AtgBatchNormGatherStatsWithCountsOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, runningMean.ctensor, runningVar.ctensor, momentum, eps, counts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormGatherStatsWithCountsOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "BatchNormGatherStatsWithCountsOut_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormGatherStatsWithCountsOut_1") return retVal0, retVal1, err } @@ -10338,10 +10914,11 @@ func BatchNormStats(input *Tensor, eps float64)(retVal0 *Tensor, retVal1 *Tensor lib.AtgBatchNormStats(ctensorPtr0, input.ctensor, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormStats() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "BatchNormStats_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormStats_1") return retVal0, retVal1, err } @@ -10354,10 +10931,11 @@ func BatchNormStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, eps float64)(r lib.AtgBatchNormStatsOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormStatsOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "BatchNormStatsOut_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormStatsOut_1") return retVal0, retVal1, err } @@ -10370,10 +10948,11 @@ func BatchNormUpdateStats(input *Tensor, runningMean *Tensor, runningVar *Tensor lib.AtgBatchNormUpdateStats(ctensorPtr0, input.ctensor, runningMean.ctensor, runningVar.ctensor, momentum) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormUpdateStats() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "BatchNormUpdateStats_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormUpdateStats_1") return retVal0, retVal1, err } @@ -10386,10 +10965,11 @@ func BatchNormUpdateStatsOut(out0 *Tensor, out1 *Tensor, input *Tensor, runningM lib.AtgBatchNormUpdateStatsOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, runningMean.ctensor, runningVar.ctensor, momentum) if err = TorchErr(); err != nil { + err = fmt.Errorf("BatchNormUpdateStatsOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "BatchNormUpdateStatsOut_0") + retVal1 = newTensor(*ctensorPtr1, "BatchNormUpdateStatsOut_1") return retVal0, retVal1, err } @@ -10403,9 +10983,10 @@ func(ts *Tensor) Bernoulli(del bool)(retVal *Tensor, err error) { lib.AtgBernoulli(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Bernoulli() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Bernoulli") return retVal, err } @@ -10418,6 +10999,7 @@ func(ts *Tensor) Bernoulli_(p *Tensor)(err error) { lib.AtgBernoulli_(ptr, ts.ctensor, p.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Bernoulli_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10433,6 +11015,7 @@ func(ts *Tensor) BernoulliFloat_(p float64)(err error) { lib.AtgBernoulliFloat_(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("BernoulliFloat_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10449,9 +11032,10 @@ func(ts *Tensor) BernoulliP(p float64, del bool)(retVal *Tensor, err error) { lib.AtgBernoulliP(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("BernoulliP() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BernoulliP") return retVal, err } @@ -10465,9 +11049,10 @@ func(ts *Tensor) BernoulliTensor(p *Tensor, del bool)(retVal *Tensor, err error) lib.AtgBernoulliTensor(ptr, ts.ctensor, p.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BernoulliTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BernoulliTensor") return retVal, err } @@ -10480,9 +11065,10 @@ func Bilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor)(retV lib.AtgBilinear(ptr, input1.ctensor, input2.ctensor, weight.ctensor, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Bilinear() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Bilinear") return retVal, err } @@ -10496,9 +11082,10 @@ func(ts *Tensor) BinaryCrossEntropy(target *Tensor, weight *Tensor, reduction in lib.AtgBinaryCrossEntropy(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("BinaryCrossEntropy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BinaryCrossEntropy") return retVal, err } @@ -10512,9 +11099,10 @@ func(ts *Tensor) BinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, lib.AtgBinaryCrossEntropyBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("BinaryCrossEntropyBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BinaryCrossEntropyBackward") return retVal, err } @@ -10528,9 +11116,10 @@ func(ts *Tensor) BinaryCrossEntropyBackwardGradInput(gradInput *Tensor, gradOutp lib.AtgBinaryCrossEntropyBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("BinaryCrossEntropyBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BinaryCrossEntropyBackwardGradInput") return retVal, err } @@ -10544,9 +11133,10 @@ func(ts *Tensor) BinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tens lib.AtgBinaryCrossEntropyOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("BinaryCrossEntropyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BinaryCrossEntropyOut") return retVal, err } @@ -10560,9 +11150,10 @@ func(ts *Tensor) BinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, po lib.AtgBinaryCrossEntropyWithLogits(ptr, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("BinaryCrossEntropyWithLogits() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BinaryCrossEntropyWithLogits") return retVal, err } @@ -10576,9 +11167,10 @@ func(ts *Tensor) BinaryCrossEntropyWithLogitsOut(out *Tensor, target *Tensor, we lib.AtgBinaryCrossEntropyWithLogitsOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("BinaryCrossEntropyWithLogitsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BinaryCrossEntropyWithLogitsOut") return retVal, err } @@ -10592,9 +11184,10 @@ func(ts *Tensor) Bincount(weights *Tensor, minlength int64, del bool)(retVal *Te lib.AtgBincount(ptr, ts.ctensor, weights.ctensor, minlength) if err = TorchErr(); err != nil { + err = fmt.Errorf("Bincount() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Bincount") return retVal, err } @@ -10608,9 +11201,10 @@ func(ts *Tensor) BincountOut(out *Tensor, weights *Tensor, minlength int64, del lib.AtgBincountOut(ptr, out.ctensor, ts.ctensor, weights.ctensor, minlength) if err = TorchErr(); err != nil { + err = fmt.Errorf("BincountOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BincountOut") return retVal, err } @@ -10623,9 +11217,10 @@ func Binomial(count *Tensor, prob *Tensor)(retVal *Tensor, err error) { lib.AtgBinomial(ptr, count.ctensor, prob.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Binomial() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Binomial") return retVal, err } @@ -10638,9 +11233,10 @@ func BinomialOut(out *Tensor, count *Tensor, prob *Tensor)(retVal *Tensor, err e lib.AtgBinomialOut(ptr, out.ctensor, count.ctensor, prob.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BinomialOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BinomialOut") return retVal, err } @@ -10654,9 +11250,10 @@ func(ts *Tensor) BitwiseAnd(other *Scalar, del bool)(retVal *Tensor, err error) lib.AtgBitwiseAnd(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseAnd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseAnd") return retVal, err } @@ -10669,6 +11266,7 @@ func(ts *Tensor) BitwiseAnd_(other *Scalar)(err error) { lib.AtgBitwiseAnd_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseAnd_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10685,9 +11283,10 @@ func(ts *Tensor) BitwiseAndScalarOut(out *Tensor, other *Scalar, del bool)(retVa lib.AtgBitwiseAndScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseAndScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseAndScalarOut") return retVal, err } @@ -10700,9 +11299,10 @@ func BitwiseAndScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, e lib.AtgBitwiseAndScalarTensor(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseAndScalarTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseAndScalarTensor") return retVal, err } @@ -10715,9 +11315,10 @@ func BitwiseAndScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(r lib.AtgBitwiseAndScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseAndScalarTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseAndScalarTensorOut") return retVal, err } @@ -10731,9 +11332,10 @@ func(ts *Tensor) BitwiseAndTensor(other *Tensor, del bool)(retVal *Tensor, err e lib.AtgBitwiseAndTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseAndTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseAndTensor") return retVal, err } @@ -10746,6 +11348,7 @@ func(ts *Tensor) BitwiseAndTensor_(other *Tensor)(err error) { lib.AtgBitwiseAndTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseAndTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10762,9 +11365,10 @@ func(ts *Tensor) BitwiseAndTensorOut(out *Tensor, other *Tensor, del bool)(retVa lib.AtgBitwiseAndTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseAndTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseAndTensorOut") return retVal, err } @@ -10778,9 +11382,10 @@ func(ts *Tensor) BitwiseLeftShift(other *Tensor, del bool)(retVal *Tensor, err e lib.AtgBitwiseLeftShift(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseLeftShift() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseLeftShift") return retVal, err } @@ -10793,6 +11398,7 @@ func(ts *Tensor) BitwiseLeftShift_(other *Tensor)(err error) { lib.AtgBitwiseLeftShift_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseLeftShift_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10808,9 +11414,10 @@ func BitwiseLeftShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Ten lib.AtgBitwiseLeftShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseLeftShiftScalarTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseLeftShiftScalarTensor") return retVal, err } @@ -10823,9 +11430,10 @@ func BitwiseLeftShiftScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Ten lib.AtgBitwiseLeftShiftScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseLeftShiftScalarTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseLeftShiftScalarTensorOut") return retVal, err } @@ -10839,9 +11447,10 @@ func(ts *Tensor) BitwiseLeftShiftTensorOut(out *Tensor, other *Tensor, del bool) lib.AtgBitwiseLeftShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseLeftShiftTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseLeftShiftTensorOut") return retVal, err } @@ -10855,9 +11464,10 @@ func(ts *Tensor) BitwiseLeftShiftTensorScalar(other *Scalar, del bool)(retVal *T lib.AtgBitwiseLeftShiftTensorScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseLeftShiftTensorScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseLeftShiftTensorScalar") return retVal, err } @@ -10870,6 +11480,7 @@ func(ts *Tensor) BitwiseLeftShiftTensorScalar_(other *Scalar)(err error) { lib.AtgBitwiseLeftShiftTensorScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseLeftShiftTensorScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10886,9 +11497,10 @@ func(ts *Tensor) BitwiseLeftShiftTensorScalarOut(out *Tensor, other *Scalar, del lib.AtgBitwiseLeftShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseLeftShiftTensorScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseLeftShiftTensorScalarOut") return retVal, err } @@ -10902,9 +11514,10 @@ func(ts *Tensor) BitwiseNot(del bool)(retVal *Tensor, err error) { lib.AtgBitwiseNot(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseNot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseNot") return retVal, err } @@ -10917,6 +11530,7 @@ func(ts *Tensor) BitwiseNot_()(err error) { lib.AtgBitwiseNot_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseNot_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10933,9 +11547,10 @@ func(ts *Tensor) BitwiseNotOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgBitwiseNotOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseNotOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseNotOut") return retVal, err } @@ -10949,9 +11564,10 @@ func(ts *Tensor) BitwiseOr(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgBitwiseOr(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseOr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseOr") return retVal, err } @@ -10964,6 +11580,7 @@ func(ts *Tensor) BitwiseOr_(other *Scalar)(err error) { lib.AtgBitwiseOr_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseOr_() failed: %w", err) return err } ts.ctensor = *ptr @@ -10980,9 +11597,10 @@ func(ts *Tensor) BitwiseOrScalarOut(out *Tensor, other *Scalar, del bool)(retVal lib.AtgBitwiseOrScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseOrScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseOrScalarOut") return retVal, err } @@ -10995,9 +11613,10 @@ func BitwiseOrScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, er lib.AtgBitwiseOrScalarTensor(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseOrScalarTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseOrScalarTensor") return retVal, err } @@ -11010,9 +11629,10 @@ func BitwiseOrScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(re lib.AtgBitwiseOrScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseOrScalarTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseOrScalarTensorOut") return retVal, err } @@ -11026,9 +11646,10 @@ func(ts *Tensor) BitwiseOrTensor(other *Tensor, del bool)(retVal *Tensor, err er lib.AtgBitwiseOrTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseOrTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseOrTensor") return retVal, err } @@ -11041,6 +11662,7 @@ func(ts *Tensor) BitwiseOrTensor_(other *Tensor)(err error) { lib.AtgBitwiseOrTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseOrTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -11057,9 +11679,10 @@ func(ts *Tensor) BitwiseOrTensorOut(out *Tensor, other *Tensor, del bool)(retVal lib.AtgBitwiseOrTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseOrTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseOrTensorOut") return retVal, err } @@ -11073,9 +11696,10 @@ func(ts *Tensor) BitwiseRightShift(other *Tensor, del bool)(retVal *Tensor, err lib.AtgBitwiseRightShift(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseRightShift() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseRightShift") return retVal, err } @@ -11088,6 +11712,7 @@ func(ts *Tensor) BitwiseRightShift_(other *Tensor)(err error) { lib.AtgBitwiseRightShift_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseRightShift_() failed: %w", err) return err } ts.ctensor = *ptr @@ -11103,9 +11728,10 @@ func BitwiseRightShiftScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Te lib.AtgBitwiseRightShiftScalarTensor(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseRightShiftScalarTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseRightShiftScalarTensor") return retVal, err } @@ -11118,9 +11744,10 @@ func BitwiseRightShiftScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Te lib.AtgBitwiseRightShiftScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseRightShiftScalarTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseRightShiftScalarTensorOut") return retVal, err } @@ -11134,9 +11761,10 @@ func(ts *Tensor) BitwiseRightShiftTensorOut(out *Tensor, other *Tensor, del bool lib.AtgBitwiseRightShiftTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseRightShiftTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseRightShiftTensorOut") return retVal, err } @@ -11150,9 +11778,10 @@ func(ts *Tensor) BitwiseRightShiftTensorScalar(other *Scalar, del bool)(retVal * lib.AtgBitwiseRightShiftTensorScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseRightShiftTensorScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseRightShiftTensorScalar") return retVal, err } @@ -11165,6 +11794,7 @@ func(ts *Tensor) BitwiseRightShiftTensorScalar_(other *Scalar)(err error) { lib.AtgBitwiseRightShiftTensorScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseRightShiftTensorScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -11181,9 +11811,10 @@ func(ts *Tensor) BitwiseRightShiftTensorScalarOut(out *Tensor, other *Scalar, de lib.AtgBitwiseRightShiftTensorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseRightShiftTensorScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseRightShiftTensorScalarOut") return retVal, err } @@ -11197,9 +11828,10 @@ func(ts *Tensor) BitwiseXor(other *Scalar, del bool)(retVal *Tensor, err error) lib.AtgBitwiseXor(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseXor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseXor") return retVal, err } @@ -11212,6 +11844,7 @@ func(ts *Tensor) BitwiseXor_(other *Scalar)(err error) { lib.AtgBitwiseXor_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseXor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -11228,9 +11861,10 @@ func(ts *Tensor) BitwiseXorScalarOut(out *Tensor, other *Scalar, del bool)(retVa lib.AtgBitwiseXorScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseXorScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseXorScalarOut") return retVal, err } @@ -11243,9 +11877,10 @@ func BitwiseXorScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, e lib.AtgBitwiseXorScalarTensor(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseXorScalarTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseXorScalarTensor") return retVal, err } @@ -11258,9 +11893,10 @@ func BitwiseXorScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(r lib.AtgBitwiseXorScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseXorScalarTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseXorScalarTensorOut") return retVal, err } @@ -11274,9 +11910,10 @@ func(ts *Tensor) BitwiseXorTensor(other *Tensor, del bool)(retVal *Tensor, err e lib.AtgBitwiseXorTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseXorTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseXorTensor") return retVal, err } @@ -11289,6 +11926,7 @@ func(ts *Tensor) BitwiseXorTensor_(other *Tensor)(err error) { lib.AtgBitwiseXorTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseXorTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -11305,9 +11943,10 @@ func(ts *Tensor) BitwiseXorTensorOut(out *Tensor, other *Tensor, del bool)(retVa lib.AtgBitwiseXorTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BitwiseXorTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BitwiseXorTensorOut") return retVal, err } @@ -11320,9 +11959,10 @@ func BlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice g lib.AtgBlackmanWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("BlackmanWindow() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BlackmanWindow") return retVal, err } @@ -11335,9 +11975,10 @@ func BlackmanWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err erro lib.AtgBlackmanWindowOut(ptr, out.ctensor, windowLength) if err = TorchErr(); err != nil { + err = fmt.Errorf("BlackmanWindowOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BlackmanWindowOut") return retVal, err } @@ -11352,9 +11993,10 @@ func BlackmanWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch if periodic { cperiodic = int32(1) } lib.AtgBlackmanWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("BlackmanWindowPeriodic() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BlackmanWindowPeriodic") return retVal, err } @@ -11369,9 +12011,10 @@ func BlackmanWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(r if periodic { cperiodic = int32(1) } lib.AtgBlackmanWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic) if err = TorchErr(); err != nil { + err = fmt.Errorf("BlackmanWindowPeriodicOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BlackmanWindowPeriodicOut") return retVal, err } @@ -11386,9 +12029,10 @@ func BlockDiag(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgBlockDiag(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("BlockDiag() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BlockDiag") return retVal, err } @@ -11403,9 +12047,10 @@ func BlockDiagOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgBlockDiagOut(ptr, out.ctensor, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("BlockDiagOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BlockDiagOut") return retVal, err } @@ -11419,9 +12064,10 @@ func(ts *Tensor) Bmm(mat2 *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgBmm(ptr, ts.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Bmm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Bmm") return retVal, err } @@ -11435,9 +12081,10 @@ func(ts *Tensor) BmmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err lib.AtgBmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("BmmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BmmOut") return retVal, err } @@ -11452,9 +12099,10 @@ func(ts *Tensor) BroadcastTo(size []int64, del bool)(retVal *Tensor, err error) sizeLen := len(size) lib.AtgBroadcastTo(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("BroadcastTo() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BroadcastTo") return retVal, err } @@ -11472,9 +12120,10 @@ cright := int32(0) if right { cright = int32(1) } lib.AtgBucketize(ptr, ts.ctensor, boundaries.ctensor, coutInt32, cright) if err = TorchErr(); err != nil { + err = fmt.Errorf("Bucketize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Bucketize") return retVal, err } @@ -11491,9 +12140,10 @@ cright := int32(0) if right { cright = int32(1) } lib.AtgBucketizeScalar(ptr, selfScalar.cscalar, boundaries.ctensor, coutInt32, cright) if err = TorchErr(); err != nil { + err = fmt.Errorf("BucketizeScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BucketizeScalar") return retVal, err } @@ -11510,9 +12160,10 @@ cright := int32(0) if right { cright = int32(1) } lib.AtgBucketizeScalarOut(ptr, out.ctensor, selfScalar.cscalar, boundaries.ctensor, coutInt32, cright) if err = TorchErr(); err != nil { + err = fmt.Errorf("BucketizeScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BucketizeScalarOut") return retVal, err } @@ -11530,9 +12181,10 @@ cright := int32(0) if right { cright = int32(1) } lib.AtgBucketizeTensorOut(ptr, out.ctensor, ts.ctensor, boundaries.ctensor, coutInt32, cright) if err = TorchErr(); err != nil { + err = fmt.Errorf("BucketizeTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "BucketizeTensorOut") return retVal, err } @@ -11543,6 +12195,7 @@ func CanCast(from gotch.DType, to gotch.DType)(retVal bool, err error) { retVal = lib.AtgCanCast(from.CInt(), to.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("CanCast() failed: %w", err) return retVal, err } return retVal, err @@ -11558,9 +12211,10 @@ func CartesianProd(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgCartesianProd(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("CartesianProd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CartesianProd") return retVal, err } @@ -11575,9 +12229,10 @@ func Cat(tensors []*Tensor, dim int64)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgCat(ptr, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cat() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cat") return retVal, err } @@ -11592,9 +12247,10 @@ func CatOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err error for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgCatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("CatOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CatOut") return retVal, err } @@ -11608,9 +12264,10 @@ func(ts *Tensor) Cauchy(median float64, sigma float64, del bool)(retVal *Tensor, lib.AtgCauchy(ptr, ts.ctensor, median, sigma) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cauchy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cauchy") return retVal, err } @@ -11623,6 +12280,7 @@ func(ts *Tensor) Cauchy_(median float64, sigma float64)(err error) { lib.AtgCauchy_(ptr, ts.ctensor, median, sigma) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cauchy_() failed: %w", err) return err } ts.ctensor = *ptr @@ -11639,9 +12297,10 @@ func(ts *Tensor) CauchyOut(out *Tensor, median float64, sigma float64, del bool) lib.AtgCauchyOut(ptr, out.ctensor, ts.ctensor, median, sigma) if err = TorchErr(); err != nil { + err = fmt.Errorf("CauchyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CauchyOut") return retVal, err } @@ -11655,9 +12314,10 @@ func(ts *Tensor) CcolIndices(del bool)(retVal *Tensor, err error) { lib.AtgCcolIndices(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CcolIndices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CcolIndices") return retVal, err } @@ -11671,9 +12331,10 @@ func(ts *Tensor) CcolIndicesCopy(del bool)(retVal *Tensor, err error) { lib.AtgCcolIndicesCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CcolIndicesCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CcolIndicesCopy") return retVal, err } @@ -11687,9 +12348,10 @@ func(ts *Tensor) CcolIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err e lib.AtgCcolIndicesCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CcolIndicesCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CcolIndicesCopyOut") return retVal, err } @@ -11708,9 +12370,10 @@ func Cdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64)(retVal *Tenso } lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, ccomputeModeVal, ccomputeModeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cdist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cdist") return retVal, err } @@ -11724,9 +12387,10 @@ func(ts *Tensor) Ceil(del bool)(retVal *Tensor, err error) { lib.AtgCeil(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ceil() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Ceil") return retVal, err } @@ -11739,6 +12403,7 @@ func(ts *Tensor) Ceil_()(err error) { lib.AtgCeil_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ceil_() failed: %w", err) return err } ts.ctensor = *ptr @@ -11755,9 +12420,10 @@ func(ts *Tensor) CeilOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgCeilOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CeilOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CeilOut") return retVal, err } @@ -11771,9 +12437,10 @@ func(ts *Tensor) Celu(del bool)(retVal *Tensor, err error) { lib.AtgCelu(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Celu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Celu") return retVal, err } @@ -11786,6 +12453,7 @@ func(ts *Tensor) Celu_()(err error) { lib.AtgCelu_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Celu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -11802,9 +12470,10 @@ func(ts *Tensor) CeluOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgCeluOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CeluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CeluOut") return retVal, err } @@ -11819,9 +12488,10 @@ func ChainMatmul(matrices []*Tensor)(retVal *Tensor, err error) { for _, t := range matrices {cmatrices = append(cmatrices, t.ctensor)} lib.AtgChainMatmul(ptr, cmatrices, len(cmatrices)) if err = TorchErr(); err != nil { + err = fmt.Errorf("ChainMatmul() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ChainMatmul") return retVal, err } @@ -11836,9 +12506,10 @@ func ChainMatmulOut(out *Tensor, matrices []*Tensor)(retVal *Tensor, err error) for _, t := range matrices {cmatrices = append(cmatrices, t.ctensor)} lib.AtgChainMatmulOut(ptr, out.ctensor, cmatrices, len(cmatrices)) if err = TorchErr(); err != nil { + err = fmt.Errorf("ChainMatmulOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ChainMatmulOut") return retVal, err } @@ -11852,9 +12523,10 @@ func(ts *Tensor) Chalf(del bool)(retVal *Tensor, err error) { lib.AtgChalf(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Chalf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Chalf") return retVal, err } @@ -11868,9 +12540,10 @@ func(ts *Tensor) ChannelShuffle(groups int64, del bool)(retVal *Tensor, err erro lib.AtgChannelShuffle(ptr, ts.ctensor, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("ChannelShuffle() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ChannelShuffle") return retVal, err } @@ -11884,9 +12557,10 @@ func(ts *Tensor) ChannelShuffleOut(out *Tensor, groups int64, del bool)(retVal * lib.AtgChannelShuffleOut(ptr, out.ctensor, ts.ctensor, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("ChannelShuffleOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ChannelShuffleOut") return retVal, err } @@ -11902,9 +12576,10 @@ func(ts *Tensor) Cholesky(upper bool, del bool)(retVal *Tensor, err error) { if upper { cupper = int32(1) } lib.AtgCholesky(ptr, ts.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cholesky() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cholesky") return retVal, err } @@ -11920,9 +12595,10 @@ func(ts *Tensor) CholeskyInverse(upper bool, del bool)(retVal *Tensor, err error if upper { cupper = int32(1) } lib.AtgCholeskyInverse(ptr, ts.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("CholeskyInverse() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CholeskyInverse") return retVal, err } @@ -11938,9 +12614,10 @@ func(ts *Tensor) CholeskyInverseOut(out *Tensor, upper bool, del bool)(retVal *T if upper { cupper = int32(1) } lib.AtgCholeskyInverseOut(ptr, out.ctensor, ts.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("CholeskyInverseOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CholeskyInverseOut") return retVal, err } @@ -11956,9 +12633,10 @@ func(ts *Tensor) CholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor, if upper { cupper = int32(1) } lib.AtgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("CholeskyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CholeskyOut") return retVal, err } @@ -11974,9 +12652,10 @@ func(ts *Tensor) CholeskySolve(input2 *Tensor, upper bool, del bool)(retVal *Ten if upper { cupper = int32(1) } lib.AtgCholeskySolve(ptr, ts.ctensor, input2.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("CholeskySolve() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CholeskySolve") return retVal, err } @@ -11992,9 +12671,10 @@ func(ts *Tensor) CholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del b if upper { cupper = int32(1) } lib.AtgCholeskySolveOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("CholeskySolveOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CholeskySolveOut") return retVal, err } @@ -12007,10 +12687,11 @@ func ChooseQparamsOptimized(input *Tensor, numel int64, nBins int64, ratio float lib.AtgChooseQparamsOptimized(ctensorPtr0, input.ctensor, numel, nBins, ratio, bitWidth) if err = TorchErr(); err != nil { + err = fmt.Errorf("ChooseQparamsOptimized() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "ChooseQparamsOptimized_0") + retVal1 = newTensor(*ctensorPtr1, "ChooseQparamsOptimized_1") return retVal0, retVal1, err } @@ -12024,9 +12705,10 @@ func(ts *Tensor) Clamp(min *Scalar, max *Scalar, del bool)(retVal *Tensor, err e lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Clamp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Clamp") return retVal, err } @@ -12039,6 +12721,7 @@ func(ts *Tensor) Clamp_(min *Scalar, max *Scalar)(err error) { lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Clamp_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12055,9 +12738,10 @@ func(ts *Tensor) ClampMax(max *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgClampMax(ptr, ts.ctensor, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampMax") return retVal, err } @@ -12070,6 +12754,7 @@ func(ts *Tensor) ClampMax_(max *Scalar)(err error) { lib.AtgClampMax_(ptr, ts.ctensor, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMax_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12086,9 +12771,10 @@ func(ts *Tensor) ClampMaxOut(out *Tensor, max *Scalar, del bool)(retVal *Tensor, lib.AtgClampMaxOut(ptr, out.ctensor, ts.ctensor, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampMaxOut") return retVal, err } @@ -12102,9 +12788,10 @@ func(ts *Tensor) ClampMaxTensor(max *Tensor, del bool)(retVal *Tensor, err error lib.AtgClampMaxTensor(ptr, ts.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMaxTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampMaxTensor") return retVal, err } @@ -12117,6 +12804,7 @@ func(ts *Tensor) ClampMaxTensor_(max *Tensor)(err error) { lib.AtgClampMaxTensor_(ptr, ts.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMaxTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12133,9 +12821,10 @@ func(ts *Tensor) ClampMaxTensorOut(out *Tensor, max *Tensor, del bool)(retVal *T lib.AtgClampMaxTensorOut(ptr, out.ctensor, ts.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMaxTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampMaxTensorOut") return retVal, err } @@ -12149,9 +12838,10 @@ func(ts *Tensor) ClampMin(min *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgClampMin(ptr, ts.ctensor, min.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMin() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampMin") return retVal, err } @@ -12164,6 +12854,7 @@ func(ts *Tensor) ClampMin_(min *Scalar)(err error) { lib.AtgClampMin_(ptr, ts.ctensor, min.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMin_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12180,9 +12871,10 @@ func(ts *Tensor) ClampMinOut(out *Tensor, min *Scalar, del bool)(retVal *Tensor, lib.AtgClampMinOut(ptr, out.ctensor, ts.ctensor, min.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMinOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampMinOut") return retVal, err } @@ -12196,9 +12888,10 @@ func(ts *Tensor) ClampMinTensor(min *Tensor, del bool)(retVal *Tensor, err error lib.AtgClampMinTensor(ptr, ts.ctensor, min.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMinTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampMinTensor") return retVal, err } @@ -12211,6 +12904,7 @@ func(ts *Tensor) ClampMinTensor_(min *Tensor)(err error) { lib.AtgClampMinTensor_(ptr, ts.ctensor, min.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMinTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12227,9 +12921,10 @@ func(ts *Tensor) ClampMinTensorOut(out *Tensor, min *Tensor, del bool)(retVal *T lib.AtgClampMinTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampMinTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampMinTensorOut") return retVal, err } @@ -12243,9 +12938,10 @@ func(ts *Tensor) ClampOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVa lib.AtgClampOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampOut") return retVal, err } @@ -12259,9 +12955,10 @@ func(ts *Tensor) ClampTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor, lib.AtgClampTensor(ptr, ts.ctensor, min.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampTensor") return retVal, err } @@ -12274,6 +12971,7 @@ func(ts *Tensor) ClampTensor_(min *Tensor, max *Tensor)(err error) { lib.AtgClampTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12290,9 +12988,10 @@ func(ts *Tensor) ClampTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool) lib.AtgClampTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClampTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClampTensorOut") return retVal, err } @@ -12306,9 +13005,10 @@ func(ts *Tensor) Clip(min *Scalar, max *Scalar, del bool)(retVal *Tensor, err er lib.AtgClip(ptr, ts.ctensor, min.cscalar, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Clip() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Clip") return retVal, err } @@ -12321,6 +13021,7 @@ func(ts *Tensor) Clip_(min *Scalar, max *Scalar)(err error) { lib.AtgClip_(ptr, ts.ctensor, min.cscalar, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Clip_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12337,9 +13038,10 @@ func(ts *Tensor) ClipOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal lib.AtgClipOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClipOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClipOut") return retVal, err } @@ -12353,9 +13055,10 @@ func(ts *Tensor) ClipTensor(min *Tensor, max *Tensor, del bool)(retVal *Tensor, lib.AtgClipTensor(ptr, ts.ctensor, min.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClipTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClipTensor") return retVal, err } @@ -12368,6 +13071,7 @@ func(ts *Tensor) ClipTensor_(min *Tensor, max *Tensor)(err error) { lib.AtgClipTensor_(ptr, ts.ctensor, min.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClipTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12384,9 +13088,10 @@ func(ts *Tensor) ClipTensorOut(out *Tensor, min *Tensor, max *Tensor, del bool)( lib.AtgClipTensorOut(ptr, out.ctensor, ts.ctensor, min.ctensor, max.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ClipTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ClipTensorOut") return retVal, err } @@ -12400,9 +13105,10 @@ func(ts *Tensor) Clone(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgClone(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Clone() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Clone") return retVal, err } @@ -12416,9 +13122,10 @@ func(ts *Tensor) Coalesce(del bool)(retVal *Tensor, err error) { lib.AtgCoalesce(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Coalesce() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Coalesce") return retVal, err } @@ -12437,9 +13144,10 @@ paddingLen := len(padding) strideLen := len(stride) lib.AtgCol2im(ptr, ts.ctensor, outputSize, outputSizeLen, kernelSize, kernelSizeLen, dilation, dilationLen, padding, paddingLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Col2im() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Col2im") return retVal, err } @@ -12458,9 +13166,10 @@ paddingLen := len(padding) strideLen := len(stride) lib.AtgCol2imOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, kernelSize, kernelSizeLen, dilation, dilationLen, padding, paddingLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Col2imOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Col2imOut") return retVal, err } @@ -12474,9 +13183,10 @@ func(ts *Tensor) ColIndices(del bool)(retVal *Tensor, err error) { lib.AtgColIndices(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ColIndices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ColIndices") return retVal, err } @@ -12490,9 +13200,10 @@ func(ts *Tensor) ColIndicesCopy(del bool)(retVal *Tensor, err error) { lib.AtgColIndicesCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ColIndicesCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ColIndicesCopy") return retVal, err } @@ -12506,9 +13217,10 @@ func(ts *Tensor) ColIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err er lib.AtgColIndicesCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ColIndicesCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ColIndicesCopyOut") return retVal, err } @@ -12523,9 +13235,10 @@ func ColumnStack(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgColumnStack(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("ColumnStack() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ColumnStack") return retVal, err } @@ -12540,9 +13253,10 @@ func ColumnStackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgColumnStackOut(ptr, out.ctensor, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("ColumnStackOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ColumnStackOut") return retVal, err } @@ -12558,9 +13272,10 @@ func(ts *Tensor) Combinations(r int64, withReplacement bool, del bool)(retVal *T if withReplacement { cwithReplacement = int32(1) } lib.AtgCombinations(ptr, ts.ctensor, r, cwithReplacement) if err = TorchErr(); err != nil { + err = fmt.Errorf("Combinations() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Combinations") return retVal, err } @@ -12573,9 +13288,10 @@ func Complex(real *Tensor, imag *Tensor)(retVal *Tensor, err error) { lib.AtgComplex(ptr, real.ctensor, imag.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Complex() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Complex") return retVal, err } @@ -12588,9 +13304,10 @@ func ComplexOut(out *Tensor, real *Tensor, imag *Tensor)(retVal *Tensor, err err lib.AtgComplexOut(ptr, out.ctensor, real.ctensor, imag.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ComplexOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ComplexOut") return retVal, err } @@ -12605,9 +13322,10 @@ func Concat(tensors []*Tensor, dim int64)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgConcat(ptr, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Concat() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Concat") return retVal, err } @@ -12622,9 +13340,10 @@ func ConcatOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err er for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgConcatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConcatOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConcatOut") return retVal, err } @@ -12639,9 +13358,10 @@ func Concatenate(tensors []*Tensor, dim int64)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgConcatenate(ptr, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Concatenate() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Concatenate") return retVal, err } @@ -12656,9 +13376,10 @@ func ConcatenateOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, e for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgConcatenateOut(ptr, out.ctensor, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConcatenateOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConcatenateOut") return retVal, err } @@ -12672,9 +13393,10 @@ func(ts *Tensor) Conj(del bool)(retVal *Tensor, err error) { lib.AtgConj(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Conj() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Conj") return retVal, err } @@ -12688,9 +13410,10 @@ func(ts *Tensor) ConjPhysical(del bool)(retVal *Tensor, err error) { lib.AtgConjPhysical(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConjPhysical() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConjPhysical") return retVal, err } @@ -12703,6 +13426,7 @@ func(ts *Tensor) ConjPhysical_()(err error) { lib.AtgConjPhysical_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConjPhysical_() failed: %w", err) return err } ts.ctensor = *ptr @@ -12719,9 +13443,10 @@ func(ts *Tensor) ConjPhysicalOut(out *Tensor, del bool)(retVal *Tensor, err erro lib.AtgConjPhysicalOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConjPhysicalOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConjPhysicalOut") return retVal, err } @@ -12736,9 +13461,10 @@ func(ts *Tensor) ConstantPadNd(pad []int64, del bool)(retVal *Tensor, err error) padLen := len(pad) lib.AtgConstantPadNd(ptr, ts.ctensor, pad, padLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConstantPadNd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConstantPadNd") return retVal, err } @@ -12753,9 +13479,10 @@ func(ts *Tensor) ConstantPadNdOut(out *Tensor, pad []int64, del bool)(retVal *Te padLen := len(pad) lib.AtgConstantPadNdOut(ptr, out.ctensor, ts.ctensor, pad, padLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConstantPadNdOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConstantPadNdOut") return retVal, err } @@ -12769,9 +13496,10 @@ func(ts *Tensor) Contiguous(del bool)(retVal *Tensor, err error) { lib.AtgContiguous(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Contiguous() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Contiguous") return retVal, err } @@ -12787,9 +13515,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("Conv1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Conv1d") return retVal, err } @@ -12804,9 +13533,10 @@ func Conv1dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, dilationLen := len(dilation) lib.AtgConv1dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("Conv1dPadding() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Conv1dPadding") return retVal, err } @@ -12822,9 +13552,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("Conv2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Conv2d") return retVal, err } @@ -12839,9 +13570,10 @@ func Conv2dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, dilationLen := len(dilation) lib.AtgConv2dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("Conv2dPadding() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Conv2dPadding") return retVal, err } @@ -12857,9 +13589,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("Conv3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Conv3d") return retVal, err } @@ -12874,9 +13607,10 @@ func Conv3dPadding(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, dilationLen := len(dilation) lib.AtgConv3dPadding(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("Conv3dPadding() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Conv3dPadding") return retVal, err } @@ -12894,9 +13628,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgConvDepthwise3d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvDepthwise3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvDepthwise3d") return retVal, err } @@ -12914,9 +13649,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgConvDepthwise3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvDepthwise3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvDepthwise3dOut") return retVal, err } @@ -12930,9 +13666,10 @@ func(ts *Tensor) ConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool)(retV lib.AtgConvTbc(ptr, ts.ctensor, weight.ctensor, bias.ctensor, pad) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvTbc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvTbc") return retVal, err } @@ -12947,11 +13684,12 @@ func(ts *Tensor) ConvTbcBackward(input *Tensor, weight *Tensor, bias *Tensor, pa lib.AtgConvTbcBackward(ctensorPtr0, ts.ctensor, input.ctensor, weight.ctensor, bias.ctensor, pad) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvTbcBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "ConvTbcBackward_0") + retVal1 = newTensor(*ctensorPtr1, "ConvTbcBackward_1") + retVal2 = newTensor(*ctensorPtr2, "ConvTbcBackward_2") return retVal0, retVal1, retVal2, err } @@ -12965,9 +13703,10 @@ func(ts *Tensor) ConvTbcOut(out *Tensor, weight *Tensor, bias *Tensor, pad int64 lib.AtgConvTbcOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, pad) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvTbcOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvTbcOut") return retVal, err } @@ -12984,9 +13723,10 @@ outputPaddingLen := len(outputPadding) dilationLen := len(dilation) lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, groups, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvTranspose1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvTranspose1d") return retVal, err } @@ -13003,9 +13743,10 @@ outputPaddingLen := len(outputPadding) dilationLen := len(dilation) lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, groups, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvTranspose2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvTranspose2d") return retVal, err } @@ -13022,9 +13763,10 @@ outputPaddingLen := len(outputPadding) dilationLen := len(dilation) lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, groups, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvTranspose3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvTranspose3d") return retVal, err } @@ -13043,9 +13785,10 @@ ctransposed := int32(0) outputPaddingLen := len(outputPadding) lib.AtgConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("Convolution() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Convolution") return retVal, err } @@ -13064,9 +13807,10 @@ ctransposed := int32(0) outputPaddingLen := len(outputPadding) lib.AtgConvolutionOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvolutionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvolutionOut") return retVal, err } @@ -13085,9 +13829,10 @@ ctransposed := int32(0) outputPaddingLen := len(outputPadding) lib.AtgConvolutionOverrideable(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvolutionOverrideable() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvolutionOverrideable") return retVal, err } @@ -13106,9 +13851,10 @@ ctransposed := int32(0) outputPaddingLen := len(outputPadding) lib.AtgConvolutionOverrideableOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, ctransposed, outputPadding, outputPaddingLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("ConvolutionOverrideableOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ConvolutionOverrideableOut") return retVal, err } @@ -13124,9 +13870,10 @@ func(ts *Tensor) Copy(src *Tensor, nonBlocking bool, del bool)(retVal *Tensor, e if nonBlocking { cnonBlocking = int32(1) } lib.AtgCopy(ptr, ts.ctensor, src.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("Copy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Copy") return retVal, err } @@ -13142,9 +13889,10 @@ func(ts *Tensor) CopyOut(out *Tensor, src *Tensor, nonBlocking bool, del bool)(r if nonBlocking { cnonBlocking = int32(1) } lib.AtgCopyOut(ptr, out.ctensor, ts.ctensor, src.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("CopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CopyOut") return retVal, err } @@ -13160,9 +13908,10 @@ func(ts *Tensor) CopySparseToSparse(src *Tensor, nonBlocking bool, del bool)(ret if nonBlocking { cnonBlocking = int32(1) } lib.AtgCopySparseToSparse(ptr, ts.ctensor, src.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("CopySparseToSparse() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CopySparseToSparse") return retVal, err } @@ -13177,6 +13926,7 @@ func(ts *Tensor) CopySparseToSparse_(src *Tensor, nonBlocking bool)(err error) { if nonBlocking { cnonBlocking = int32(1) } lib.AtgCopySparseToSparse_(ptr, ts.ctensor, src.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("CopySparseToSparse_() failed: %w", err) return err } ts.ctensor = *ptr @@ -13195,9 +13945,10 @@ func(ts *Tensor) CopySparseToSparseOut(out *Tensor, src *Tensor, nonBlocking boo if nonBlocking { cnonBlocking = int32(1) } lib.AtgCopySparseToSparseOut(ptr, out.ctensor, ts.ctensor, src.ctensor, cnonBlocking) if err = TorchErr(); err != nil { + err = fmt.Errorf("CopySparseToSparseOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CopySparseToSparseOut") return retVal, err } @@ -13211,9 +13962,10 @@ func(ts *Tensor) Copysign(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgCopysign(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Copysign() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Copysign") return retVal, err } @@ -13226,6 +13978,7 @@ func(ts *Tensor) Copysign_(other *Tensor)(err error) { lib.AtgCopysign_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Copysign_() failed: %w", err) return err } ts.ctensor = *ptr @@ -13242,9 +13995,10 @@ func(ts *Tensor) CopysignOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgCopysignOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CopysignOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CopysignOut") return retVal, err } @@ -13258,9 +14012,10 @@ func(ts *Tensor) CopysignScalar(other *Scalar, del bool)(retVal *Tensor, err err lib.AtgCopysignScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("CopysignScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CopysignScalar") return retVal, err } @@ -13273,6 +14028,7 @@ func(ts *Tensor) CopysignScalar_(other *Scalar)(err error) { lib.AtgCopysignScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("CopysignScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -13289,9 +14045,10 @@ func(ts *Tensor) CopysignScalarOut(out *Tensor, other *Scalar, del bool)(retVal lib.AtgCopysignScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("CopysignScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CopysignScalarOut") return retVal, err } @@ -13305,9 +14062,10 @@ func(ts *Tensor) Corrcoef(del bool)(retVal *Tensor, err error) { lib.AtgCorrcoef(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Corrcoef() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Corrcoef") return retVal, err } @@ -13321,9 +14079,10 @@ func(ts *Tensor) Cos(del bool)(retVal *Tensor, err error) { lib.AtgCos(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cos() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cos") return retVal, err } @@ -13336,6 +14095,7 @@ func(ts *Tensor) Cos_()(err error) { lib.AtgCos_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cos_() failed: %w", err) return err } ts.ctensor = *ptr @@ -13352,9 +14112,10 @@ func(ts *Tensor) CosOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgCosOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CosOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CosOut") return retVal, err } @@ -13368,9 +14129,10 @@ func(ts *Tensor) Cosh(del bool)(retVal *Tensor, err error) { lib.AtgCosh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cosh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cosh") return retVal, err } @@ -13383,6 +14145,7 @@ func(ts *Tensor) Cosh_()(err error) { lib.AtgCosh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cosh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -13399,9 +14162,10 @@ func(ts *Tensor) CoshOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgCoshOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CoshOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CoshOut") return retVal, err } @@ -13414,9 +14178,10 @@ func CosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin lib.AtgCosineEmbeddingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("CosineEmbeddingLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CosineEmbeddingLoss") return retVal, err } @@ -13429,9 +14194,10 @@ func CosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64)(retVal *Te lib.AtgCosineSimilarity(ptr, x1.ctensor, x2.ctensor, dim, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("CosineSimilarity() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CosineSimilarity") return retVal, err } @@ -13451,9 +14217,10 @@ func(ts *Tensor) CountNonzero(dim []int64, del bool)(retVal *Tensor, err error) } lib.AtgCountNonzero(ptr, ts.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("CountNonzero() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CountNonzero") return retVal, err } @@ -13468,9 +14235,10 @@ func(ts *Tensor) CountNonzeroDimIntlist(dim []int64, del bool)(retVal *Tensor, e dimLen := len(dim) lib.AtgCountNonzeroDimIntlist(ptr, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("CountNonzeroDimIntlist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CountNonzeroDimIntlist") return retVal, err } @@ -13485,9 +14253,10 @@ func(ts *Tensor) CountNonzeroDimIntlistOut(out *Tensor, dim []int64, del bool)(r dimLen := len(dim) lib.AtgCountNonzeroDimIntlistOut(ptr, out.ctensor, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("CountNonzeroDimIntlistOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CountNonzeroDimIntlistOut") return retVal, err } @@ -13507,9 +14276,10 @@ func(ts *Tensor) CountNonzeroOut(out *Tensor, dim []int64, del bool)(retVal *Ten } lib.AtgCountNonzeroOut(ptr, out.ctensor, ts.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("CountNonzeroOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CountNonzeroOut") return retVal, err } @@ -13523,9 +14293,10 @@ func(ts *Tensor) Cov(correction int64, fweights *Tensor, aweights *Tensor, del b lib.AtgCov(ptr, ts.ctensor, correction, fweights.ctensor, aweights.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cov() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cov") return retVal, err } @@ -13545,9 +14316,10 @@ func(ts *Tensor) Cross(other *Tensor, dim []int64, del bool)(retVal *Tensor, err } lib.AtgCross(ptr, ts.ctensor, other.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cross() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cross") return retVal, err } @@ -13561,9 +14333,10 @@ func(ts *Tensor) CrossEntropyLoss(target *Tensor, weight *Tensor, reduction int6 lib.AtgCrossEntropyLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, labelSmoothing) if err = TorchErr(); err != nil { + err = fmt.Errorf("CrossEntropyLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CrossEntropyLoss") return retVal, err } @@ -13583,9 +14356,10 @@ func(ts *Tensor) CrossOut(out *Tensor, other *Tensor, dim []int64, del bool)(ret } lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("CrossOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CrossOut") return retVal, err } @@ -13599,9 +14373,10 @@ func(ts *Tensor) CrowIndices(del bool)(retVal *Tensor, err error) { lib.AtgCrowIndices(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CrowIndices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CrowIndices") return retVal, err } @@ -13615,9 +14390,10 @@ func(ts *Tensor) CrowIndicesCopy(del bool)(retVal *Tensor, err error) { lib.AtgCrowIndicesCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CrowIndicesCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CrowIndicesCopy") return retVal, err } @@ -13631,9 +14407,10 @@ func(ts *Tensor) CrowIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err e lib.AtgCrowIndicesCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CrowIndicesCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CrowIndicesCopyOut") return retVal, err } @@ -13650,9 +14427,10 @@ czeroInfinity := int32(0) if zeroInfinity { czeroInfinity = int32(1) } lib.AtgCtcLoss(ptr, logProbs.ctensor, targets.ctensor, inputLengths, inputLengthsLen, targetLengths, targetLengthsLen, blank, reduction, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("CtcLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CtcLoss") return retVal, err } @@ -13667,9 +14445,10 @@ func CtcLossTensor(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targ if zeroInfinity { czeroInfinity = int32(1) } lib.AtgCtcLossTensor(ptr, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, reduction, czeroInfinity) if err = TorchErr(); err != nil { + err = fmt.Errorf("CtcLossTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CtcLossTensor") return retVal, err } @@ -13682,9 +14461,10 @@ func CudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64) lib.AtgCudnnAffineGridGenerator(ptr, theta.ctensor, n, c, h, w) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnAffineGridGenerator() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnAffineGridGenerator") return retVal, err } @@ -13697,9 +14477,10 @@ func CudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w lib.AtgCudnnAffineGridGeneratorBackward(ptr, grad.ctensor, n, c, h, w) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnAffineGridGeneratorBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnAffineGridGeneratorBackward") return retVal, err } @@ -13712,9 +14493,10 @@ func CudnnAffineGridGeneratorBackwardOut(out *Tensor, grad *Tensor, n int64, c i lib.AtgCudnnAffineGridGeneratorBackwardOut(ptr, out.ctensor, grad.ctensor, n, c, h, w) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnAffineGridGeneratorBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnAffineGridGeneratorBackwardOut") return retVal, err } @@ -13727,9 +14509,10 @@ func CudnnAffineGridGeneratorOut(out *Tensor, theta *Tensor, n int64, c int64, h lib.AtgCudnnAffineGridGeneratorOut(ptr, out.ctensor, theta.ctensor, n, c, h, w) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnAffineGridGeneratorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnAffineGridGeneratorOut") return retVal, err } @@ -13746,12 +14529,13 @@ func CudnnBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Te if training { ctraining = int32(1) } lib.AtgCudnnBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnBatchNorm() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "CudnnBatchNorm_0") + retVal1 = newTensor(*ctensorPtr1, "CudnnBatchNorm_1") + retVal2 = newTensor(*ctensorPtr2, "CudnnBatchNorm_2") + retVal3 = newTensor(*ctensorPtr3, "CudnnBatchNorm_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -13765,11 +14549,12 @@ func CudnnBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, r lib.AtgCudnnBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon, reserveSpace.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnBatchNormBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "CudnnBatchNormBackward_0") + retVal1 = newTensor(*ctensorPtr1, "CudnnBatchNormBackward_1") + retVal2 = newTensor(*ctensorPtr2, "CudnnBatchNormBackward_2") return retVal0, retVal1, retVal2, err } @@ -13783,11 +14568,12 @@ func CudnnBatchNormBackwardOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input * lib.AtgCudnnBatchNormBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon, reserveSpace.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnBatchNormBackwardOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "CudnnBatchNormBackwardOut_0") + retVal1 = newTensor(*ctensorPtr1, "CudnnBatchNormBackwardOut_1") + retVal2 = newTensor(*ctensorPtr2, "CudnnBatchNormBackwardOut_2") return retVal0, retVal1, retVal2, err } @@ -13804,12 +14590,13 @@ func CudnnBatchNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, out3 *Tensor, i if training { ctraining = int32(1) } lib.AtgCudnnBatchNormOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnBatchNormOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "CudnnBatchNormOut_0") + retVal1 = newTensor(*ctensorPtr1, "CudnnBatchNormOut_1") + retVal2 = newTensor(*ctensorPtr2, "CudnnBatchNormOut_2") + retVal3 = newTensor(*ctensorPtr3, "CudnnBatchNormOut_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -13832,9 +14619,10 @@ callowTf32 := int32(0) if allowTf32 { callowTf32 = int32(1) } lib.AtgCudnnConvolution(ptr, ts.ctensor, weight.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic, callowTf32) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnConvolution() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnConvolution") return retVal, err } @@ -13851,9 +14639,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgCudnnConvolutionAddRelu(ptr, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnConvolutionAddRelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnConvolutionAddRelu") return retVal, err } @@ -13870,9 +14659,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgCudnnConvolutionAddReluOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnConvolutionAddReluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnConvolutionAddReluOut") return retVal, err } @@ -13895,9 +14685,10 @@ callowTf32 := int32(0) if allowTf32 { callowTf32 = int32(1) } lib.AtgCudnnConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic, callowTf32) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnConvolutionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnConvolutionOut") return retVal, err } @@ -13914,9 +14705,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgCudnnConvolutionRelu(ptr, ts.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnConvolutionRelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnConvolutionRelu") return retVal, err } @@ -13933,9 +14725,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgCudnnConvolutionReluOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnConvolutionReluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnConvolutionReluOut") return retVal, err } @@ -13959,9 +14752,10 @@ callowTf32 := int32(0) if allowTf32 { callowTf32 = int32(1) } lib.AtgCudnnConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic, callowTf32) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnConvolutionTranspose() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnConvolutionTranspose") return retVal, err } @@ -13985,9 +14779,10 @@ callowTf32 := int32(0) if allowTf32 { callowTf32 = int32(1) } lib.AtgCudnnConvolutionTransposeOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic, callowTf32) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnConvolutionTransposeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnConvolutionTransposeOut") return retVal, err } @@ -14001,9 +14796,10 @@ func(ts *Tensor) CudnnGridSampler(grid *Tensor, del bool)(retVal *Tensor, err er lib.AtgCudnnGridSampler(ptr, ts.ctensor, grid.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnGridSampler() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnGridSampler") return retVal, err } @@ -14017,10 +14813,11 @@ func(ts *Tensor) CudnnGridSamplerBackward(grid *Tensor, gradOutput *Tensor, del lib.AtgCudnnGridSamplerBackward(ctensorPtr0, ts.ctensor, grid.ctensor, gradOutput.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnGridSamplerBackward() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "CudnnGridSamplerBackward_0") + retVal1 = newTensor(*ctensorPtr1, "CudnnGridSamplerBackward_1") return retVal0, retVal1, err } @@ -14034,10 +14831,11 @@ func(ts *Tensor) CudnnGridSamplerBackwardOut(out0 *Tensor, out1 *Tensor, grid *T lib.AtgCudnnGridSamplerBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, grid.ctensor, gradOutput.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnGridSamplerBackwardOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "CudnnGridSamplerBackwardOut_0") + retVal1 = newTensor(*ctensorPtr1, "CudnnGridSamplerBackwardOut_1") return retVal0, retVal1, err } @@ -14051,9 +14849,10 @@ func(ts *Tensor) CudnnGridSamplerOut(out *Tensor, grid *Tensor, del bool)(retVal lib.AtgCudnnGridSamplerOut(ptr, out.ctensor, ts.ctensor, grid.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnGridSamplerOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CudnnGridSamplerOut") return retVal, err } @@ -14065,6 +14864,7 @@ func(ts *Tensor) CudnnIsAcceptable(del bool)(retVal bool, err error) { retVal = lib.AtgCudnnIsAcceptable(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CudnnIsAcceptable() failed: %w", err) return retVal, err } return retVal, err @@ -14079,10 +14879,11 @@ func(ts *Tensor) Cummax(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, e lib.AtgCummax(ctensorPtr0, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cummax() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Cummax_0") + retVal1 = newTensor(*ctensorPtr1, "Cummax_1") return retVal0, retVal1, err } @@ -14096,10 +14897,11 @@ func(ts *Tensor) CummaxOut(values *Tensor, indices *Tensor, dim int64, del bool) lib.AtgCummaxOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("CummaxOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "CummaxOut_0") + retVal1 = newTensor(*ctensorPtr1, "CummaxOut_1") return retVal0, retVal1, err } @@ -14112,9 +14914,10 @@ func CummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64)( lib.AtgCummaxminBackward(ptr, grad.ctensor, input.ctensor, indices.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("CummaxminBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CummaxminBackward") return retVal, err } @@ -14128,10 +14931,11 @@ func(ts *Tensor) Cummin(dim int64, del bool)(retVal0 *Tensor, retVal1 *Tensor, e lib.AtgCummin(ctensorPtr0, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cummin() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Cummin_0") + retVal1 = newTensor(*ctensorPtr1, "Cummin_1") return retVal0, retVal1, err } @@ -14145,10 +14949,11 @@ func(ts *Tensor) CumminOut(values *Tensor, indices *Tensor, dim int64, del bool) lib.AtgCumminOut(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("CumminOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "CumminOut_0") + retVal1 = newTensor(*ctensorPtr1, "CumminOut_1") return retVal0, retVal1, err } @@ -14162,9 +14967,10 @@ func(ts *Tensor) Cumprod(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, lib.AtgCumprod(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cumprod() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cumprod") return retVal, err } @@ -14177,6 +14983,7 @@ func(ts *Tensor) Cumprod_(dim int64, dtype gotch.DType)(err error) { lib.AtgCumprod_(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cumprod_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14192,9 +14999,10 @@ func CumprodBackward(grad *Tensor, input *Tensor, dim int64, output *Tensor)(ret lib.AtgCumprodBackward(ptr, grad.ctensor, input.ctensor, dim, output.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("CumprodBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CumprodBackward") return retVal, err } @@ -14208,9 +15016,10 @@ func(ts *Tensor) CumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool) lib.AtgCumprodOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("CumprodOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CumprodOut") return retVal, err } @@ -14224,9 +15033,10 @@ func(ts *Tensor) Cumsum(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, lib.AtgCumsum(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cumsum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Cumsum") return retVal, err } @@ -14239,6 +15049,7 @@ func(ts *Tensor) Cumsum_(dim int64, dtype gotch.DType)(err error) { lib.AtgCumsum_(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Cumsum_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14255,9 +15066,10 @@ func(ts *Tensor) CumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool)( lib.AtgCumsumOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("CumsumOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CumsumOut") return retVal, err } @@ -14270,9 +15082,10 @@ func CumulativeTrapezoid(y *Tensor, dim int64)(retVal *Tensor, err error) { lib.AtgCumulativeTrapezoid(ptr, y.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("CumulativeTrapezoid() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CumulativeTrapezoid") return retVal, err } @@ -14285,9 +15098,10 @@ func CumulativeTrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err e lib.AtgCumulativeTrapezoidX(ptr, y.ctensor, x.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("CumulativeTrapezoidX() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "CumulativeTrapezoidX") return retVal, err } @@ -14301,9 +15115,10 @@ func(ts *Tensor) Data(del bool)(retVal *Tensor, err error) { lib.AtgData(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Data() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Data") return retVal, err } @@ -14317,9 +15132,10 @@ func(ts *Tensor) Deg2rad(del bool)(retVal *Tensor, err error) { lib.AtgDeg2rad(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Deg2rad() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Deg2rad") return retVal, err } @@ -14332,6 +15148,7 @@ func(ts *Tensor) Deg2rad_()(err error) { lib.AtgDeg2rad_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Deg2rad_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14348,9 +15165,10 @@ func(ts *Tensor) Deg2radOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgDeg2radOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Deg2radOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Deg2radOut") return retVal, err } @@ -14362,6 +15180,7 @@ func(ts *Tensor) DenseDim(del bool)(retVal int64, err error) { retVal = lib.AtgDenseDim(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DenseDim() failed: %w", err) return retVal, err } return retVal, err @@ -14376,9 +15195,10 @@ func(ts *Tensor) Dequantize(del bool)(retVal *Tensor, err error) { lib.AtgDequantize(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Dequantize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Dequantize") return retVal, err } @@ -14392,9 +15212,10 @@ func(ts *Tensor) Det(del bool)(retVal *Tensor, err error) { lib.AtgDet(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Det() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Det") return retVal, err } @@ -14408,9 +15229,10 @@ func(ts *Tensor) Detach(del bool)(retVal *Tensor, err error) { lib.AtgDetach(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Detach() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Detach") return retVal, err } @@ -14423,6 +15245,7 @@ func(ts *Tensor) Detach_()(err error) { lib.AtgDetach_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Detach_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14439,9 +15262,10 @@ func(ts *Tensor) DetachCopy(del bool)(retVal *Tensor, err error) { lib.AtgDetachCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DetachCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DetachCopy") return retVal, err } @@ -14455,9 +15279,10 @@ func(ts *Tensor) DetachCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgDetachCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DetachCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DetachCopyOut") return retVal, err } @@ -14471,9 +15296,10 @@ func(ts *Tensor) Diag(diagonal int64, del bool)(retVal *Tensor, err error) { lib.AtgDiag(ptr, ts.ctensor, diagonal) if err = TorchErr(); err != nil { + err = fmt.Errorf("Diag() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Diag") return retVal, err } @@ -14487,9 +15313,10 @@ func(ts *Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVa lib.AtgDiagEmbed(ptr, ts.ctensor, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagEmbed() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagEmbed") return retVal, err } @@ -14503,9 +15330,10 @@ func(ts *Tensor) DiagEmbedOut(out *Tensor, offset int64, dim1 int64, dim2 int64, lib.AtgDiagEmbedOut(ptr, out.ctensor, ts.ctensor, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagEmbedOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagEmbedOut") return retVal, err } @@ -14519,9 +15347,10 @@ func(ts *Tensor) DiagOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, lib.AtgDiagOut(ptr, out.ctensor, ts.ctensor, diagonal) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagOut") return retVal, err } @@ -14535,9 +15364,10 @@ func(ts *Tensor) Diagflat(offset int64, del bool)(retVal *Tensor, err error) { lib.AtgDiagflat(ptr, ts.ctensor, offset) if err = TorchErr(); err != nil { + err = fmt.Errorf("Diagflat() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Diagflat") return retVal, err } @@ -14551,9 +15381,10 @@ func(ts *Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal lib.AtgDiagonal(ptr, ts.ctensor, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("Diagonal() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Diagonal") return retVal, err } @@ -14567,9 +15398,10 @@ func DiagonalBackward(gradOutput *Tensor, inputSizes []int64, offset int64, dim1 inputSizesLen := len(inputSizes) lib.AtgDiagonalBackward(ptr, gradOutput.ctensor, inputSizes, inputSizesLen, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagonalBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagonalBackward") return retVal, err } @@ -14583,9 +15415,10 @@ func DiagonalBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, of inputSizesLen := len(inputSizes) lib.AtgDiagonalBackwardOut(ptr, out.ctensor, gradOutput.ctensor, inputSizes, inputSizesLen, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagonalBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagonalBackwardOut") return retVal, err } @@ -14599,9 +15432,10 @@ func(ts *Tensor) DiagonalCopy(offset int64, dim1 int64, dim2 int64, del bool)(re lib.AtgDiagonalCopy(ptr, ts.ctensor, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagonalCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagonalCopy") return retVal, err } @@ -14615,9 +15449,10 @@ func(ts *Tensor) DiagonalCopyOut(out *Tensor, offset int64, dim1 int64, dim2 int lib.AtgDiagonalCopyOut(ptr, out.ctensor, ts.ctensor, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagonalCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagonalCopyOut") return retVal, err } @@ -14631,9 +15466,10 @@ func(ts *Tensor) DiagonalScatter(src *Tensor, offset int64, dim1 int64, dim2 int lib.AtgDiagonalScatter(ptr, ts.ctensor, src.ctensor, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagonalScatter() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagonalScatter") return retVal, err } @@ -14647,9 +15483,10 @@ func(ts *Tensor) DiagonalScatterOut(out *Tensor, src *Tensor, offset int64, dim1 lib.AtgDiagonalScatterOut(ptr, out.ctensor, ts.ctensor, src.ctensor, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiagonalScatterOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiagonalScatterOut") return retVal, err } @@ -14663,9 +15500,10 @@ func(ts *Tensor) Diff(n int64, dim int64, prepend *Tensor, append *Tensor, del b lib.AtgDiff(ptr, ts.ctensor, n, dim, prepend.ctensor, append.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Diff() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Diff") return retVal, err } @@ -14679,9 +15517,10 @@ func(ts *Tensor) DiffOut(out *Tensor, n int64, dim int64, prepend *Tensor, appen lib.AtgDiffOut(ptr, out.ctensor, ts.ctensor, n, dim, prepend.ctensor, append.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DiffOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DiffOut") return retVal, err } @@ -14695,9 +15534,10 @@ func(ts *Tensor) Digamma(del bool)(retVal *Tensor, err error) { lib.AtgDigamma(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Digamma() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Digamma") return retVal, err } @@ -14710,6 +15550,7 @@ func(ts *Tensor) Digamma_()(err error) { lib.AtgDigamma_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Digamma_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14726,9 +15567,10 @@ func(ts *Tensor) DigammaOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgDigammaOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DigammaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DigammaOut") return retVal, err } @@ -14742,9 +15584,10 @@ func(ts *Tensor) Dist(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgDist(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Dist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Dist") return retVal, err } @@ -14758,9 +15601,10 @@ func(ts *Tensor) DistOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, e lib.AtgDistOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DistOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DistOut") return retVal, err } @@ -14774,9 +15618,10 @@ func(ts *Tensor) Div(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgDiv(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Div() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Div") return retVal, err } @@ -14789,6 +15634,7 @@ func(ts *Tensor) Div_(other *Tensor)(err error) { lib.AtgDiv_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Div_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14805,9 +15651,10 @@ func(ts *Tensor) DivOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, er lib.AtgDivOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivOut") return retVal, err } @@ -14821,9 +15668,10 @@ func(ts *Tensor) DivOutMode(out *Tensor, other *Tensor, roundingMode string, del lib.AtgDivOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivOutMode() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivOutMode") return retVal, err } @@ -14837,9 +15685,10 @@ func(ts *Tensor) DivScalar(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgDivScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivScalar") return retVal, err } @@ -14852,6 +15701,7 @@ func(ts *Tensor) DivScalar_(other *Scalar)(err error) { lib.AtgDivScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14868,9 +15718,10 @@ func(ts *Tensor) DivScalarMode(other *Scalar, roundingMode string, del bool)(ret lib.AtgDivScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivScalarMode() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivScalarMode") return retVal, err } @@ -14883,6 +15734,7 @@ func(ts *Tensor) DivScalarMode_(other *Scalar, roundingMode string)(err error) { lib.AtgDivScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivScalarMode_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14899,9 +15751,10 @@ func(ts *Tensor) DivScalarModeOut(out *Tensor, other *Scalar, roundingMode strin lib.AtgDivScalarModeOut(ptr, out.ctensor, ts.ctensor, other.cscalar, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivScalarModeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivScalarModeOut") return retVal, err } @@ -14915,9 +15768,10 @@ func(ts *Tensor) DivScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tens lib.AtgDivScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivScalarOut") return retVal, err } @@ -14931,9 +15785,10 @@ func(ts *Tensor) DivTensorMode(other *Tensor, roundingMode string, del bool)(ret lib.AtgDivTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivTensorMode() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivTensorMode") return retVal, err } @@ -14946,6 +15801,7 @@ func(ts *Tensor) DivTensorMode_(other *Tensor, roundingMode string)(err error) { lib.AtgDivTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivTensorMode_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14962,9 +15818,10 @@ func(ts *Tensor) Divide(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgDivide(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Divide() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Divide") return retVal, err } @@ -14977,6 +15834,7 @@ func(ts *Tensor) Divide_(other *Tensor)(err error) { lib.AtgDivide_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Divide_() failed: %w", err) return err } ts.ctensor = *ptr @@ -14993,9 +15851,10 @@ func(ts *Tensor) DivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, lib.AtgDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivideOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivideOut") return retVal, err } @@ -15009,9 +15868,10 @@ func(ts *Tensor) DivideOutMode(out *Tensor, other *Tensor, roundingMode string, lib.AtgDivideOutMode(ptr, out.ctensor, ts.ctensor, other.ctensor, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivideOutMode() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivideOutMode") return retVal, err } @@ -15025,9 +15885,10 @@ func(ts *Tensor) DivideScalar(other *Scalar, del bool)(retVal *Tensor, err error lib.AtgDivideScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivideScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivideScalar") return retVal, err } @@ -15040,6 +15901,7 @@ func(ts *Tensor) DivideScalar_(other *Scalar)(err error) { lib.AtgDivideScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivideScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15056,9 +15918,10 @@ func(ts *Tensor) DivideScalarMode(other *Scalar, roundingMode string, del bool)( lib.AtgDivideScalarMode(ptr, ts.ctensor, other.cscalar, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivideScalarMode() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivideScalarMode") return retVal, err } @@ -15071,6 +15934,7 @@ func(ts *Tensor) DivideScalarMode_(other *Scalar, roundingMode string)(err error lib.AtgDivideScalarMode_(ptr, ts.ctensor, other.cscalar, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivideScalarMode_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15087,9 +15951,10 @@ func(ts *Tensor) DivideTensorMode(other *Tensor, roundingMode string, del bool)( lib.AtgDivideTensorMode(ptr, ts.ctensor, other.ctensor, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivideTensorMode() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DivideTensorMode") return retVal, err } @@ -15102,6 +15967,7 @@ func(ts *Tensor) DivideTensorMode_(other *Tensor, roundingMode string)(err error lib.AtgDivideTensorMode_(ptr, ts.ctensor, other.ctensor, roundingMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("DivideTensorMode_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15118,9 +15984,10 @@ func(ts *Tensor) Dot(tensor *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgDot(ptr, ts.ctensor, tensor.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Dot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Dot") return retVal, err } @@ -15134,9 +16001,10 @@ func(ts *Tensor) DotOut(out *Tensor, tensor *Tensor, del bool)(retVal *Tensor, e lib.AtgDotOut(ptr, out.ctensor, ts.ctensor, tensor.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("DotOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DotOut") return retVal, err } @@ -15151,9 +16019,10 @@ func Dropout(input *Tensor, p float64, train bool)(retVal *Tensor, err error) { if train { ctrain = int32(1) } lib.AtgDropout(ptr, input.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("Dropout() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Dropout") return retVal, err } @@ -15168,6 +16037,7 @@ func(ts *Tensor) Dropout_(p float64, train bool)(err error) { if train { ctrain = int32(1) } lib.AtgDropout_(ptr, ts.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("Dropout_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15185,9 +16055,10 @@ func Dstack(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgDstack(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("Dstack() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Dstack") return retVal, err } @@ -15202,9 +16073,10 @@ func DstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgDstackOut(ptr, out.ctensor, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("DstackOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "DstackOut") return retVal, err } @@ -15220,9 +16092,10 @@ func Einsum(equation string, tensors []*Tensor, path []int64)(retVal *Tensor, er pathLen := len(path) lib.AtgEinsum(ptr, equation, ctensors, len(ctensors), path, pathLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Einsum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Einsum") return retVal, err } @@ -15236,9 +16109,10 @@ func(ts *Tensor) Elu(del bool)(retVal *Tensor, err error) { lib.AtgElu(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Elu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Elu") return retVal, err } @@ -15251,6 +16125,7 @@ func(ts *Tensor) Elu_()(err error) { lib.AtgElu_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Elu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15268,9 +16143,10 @@ func EluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *S if isResult { cisResult = int32(1) } lib.AtgEluBackward(ptr, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EluBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EluBackward") return retVal, err } @@ -15285,9 +16161,10 @@ func EluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, if isResult { cisResult = int32(1) } lib.AtgEluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, cisResult, selfOrResult.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EluBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EluBackwardGradInput") return retVal, err } @@ -15301,9 +16178,10 @@ func(ts *Tensor) EluOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgEluOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EluOut") return retVal, err } @@ -15320,9 +16198,10 @@ csparse := int32(0) if sparse { csparse = int32(1) } lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse) if err = TorchErr(); err != nil { + err = fmt.Errorf("Embedding() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Embedding") return retVal, err } @@ -15339,9 +16218,10 @@ csparse := int32(0) if sparse { csparse = int32(1) } lib.AtgEmbeddingBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq, csparse) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmbeddingBackward") return retVal, err } @@ -15362,12 +16242,13 @@ cincludeLastOffset := int32(0) if includeLastOffset { cincludeLastOffset = int32(1) } lib.AtgEmbeddingBag(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingBag() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "EmbeddingBag_0") + retVal1 = newTensor(*ctensorPtr1, "EmbeddingBag_1") + retVal2 = newTensor(*ctensorPtr2, "EmbeddingBag_2") + retVal3 = newTensor(*ctensorPtr3, "EmbeddingBag_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -15394,12 +16275,13 @@ var cpaddingIdxVal int64 = 0 } lib.AtgEmbeddingBagPaddingIdx(ctensorPtr0, weight.ctensor, indices.ctensor, offsets.ctensor, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor, cincludeLastOffset, cpaddingIdxVal, cpaddingIdxNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingBagPaddingIdx() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "EmbeddingBagPaddingIdx_0") + retVal1 = newTensor(*ctensorPtr1, "EmbeddingBagPaddingIdx_1") + retVal2 = newTensor(*ctensorPtr2, "EmbeddingBagPaddingIdx_2") + retVal3 = newTensor(*ctensorPtr3, "EmbeddingBagPaddingIdx_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -15414,9 +16296,10 @@ func EmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int6 if scaleGradByFreq { cscaleGradByFreq = int32(1) } lib.AtgEmbeddingDenseBackward(ptr, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingDenseBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmbeddingDenseBackward") return retVal, err } @@ -15431,9 +16314,10 @@ func EmbeddingDenseBackwardOut(out *Tensor, gradOutput *Tensor, indices *Tensor, if scaleGradByFreq { cscaleGradByFreq = int32(1) } lib.AtgEmbeddingDenseBackwardOut(ptr, out.ctensor, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingDenseBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmbeddingDenseBackwardOut") return retVal, err } @@ -15450,9 +16334,10 @@ csparse := int32(0) if sparse { csparse = int32(1) } lib.AtgEmbeddingOut(ptr, out.ctensor, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmbeddingOut") return retVal, err } @@ -15466,9 +16351,10 @@ func(ts *Tensor) EmbeddingRenorm(indices *Tensor, maxNorm float64, normType floa lib.AtgEmbeddingRenorm(ptr, ts.ctensor, indices.ctensor, maxNorm, normType) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingRenorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmbeddingRenorm") return retVal, err } @@ -15481,6 +16367,7 @@ func(ts *Tensor) EmbeddingRenorm_(indices *Tensor, maxNorm float64, normType flo lib.AtgEmbeddingRenorm_(ptr, ts.ctensor, indices.ctensor, maxNorm, normType) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingRenorm_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15497,9 +16384,10 @@ func(ts *Tensor) EmbeddingRenormOut(out *Tensor, indices *Tensor, maxNorm float6 lib.AtgEmbeddingRenormOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, maxNorm, normType) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingRenormOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmbeddingRenormOut") return retVal, err } @@ -15514,9 +16402,10 @@ func EmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, pa if scaleGradByFreq { cscaleGradByFreq = int32(1) } lib.AtgEmbeddingSparseBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmbeddingSparseBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmbeddingSparseBackward") return retVal, err } @@ -15530,9 +16419,10 @@ func Empty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(re sizeLen := len(size) lib.AtgEmpty(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Empty() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Empty") return retVal, err } @@ -15546,9 +16436,10 @@ func(ts *Tensor) EmptyLike(del bool)(retVal *Tensor, err error) { lib.AtgEmptyLike(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmptyLike() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmptyLike") return retVal, err } @@ -15562,9 +16453,10 @@ func(ts *Tensor) EmptyLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgEmptyLikeOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmptyLikeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmptyLikeOut") return retVal, err } @@ -15578,9 +16470,10 @@ func EmptyOut(out *Tensor, size []int64)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgEmptyOut(ptr, out.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmptyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmptyOut") return retVal, err } @@ -15594,9 +16487,10 @@ func EmptyQuantized(size []int64, qtensor *Tensor, optionsKind gotch.DType, opti sizeLen := len(size) lib.AtgEmptyQuantized(ptr, size, sizeLen, qtensor.ctensor, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmptyQuantized() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmptyQuantized") return retVal, err } @@ -15610,9 +16504,10 @@ func EmptyQuantizedOut(out *Tensor, size []int64, qtensor *Tensor)(retVal *Tenso sizeLen := len(size) lib.AtgEmptyQuantizedOut(ptr, out.ctensor, size, sizeLen, qtensor.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmptyQuantizedOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmptyQuantizedOut") return retVal, err } @@ -15627,9 +16522,10 @@ func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, options strideLen := len(stride) lib.AtgEmptyStrided(ptr, size, sizeLen, stride, strideLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmptyStrided() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmptyStrided") return retVal, err } @@ -15644,9 +16540,10 @@ func EmptyStridedOut(out *Tensor, size []int64, stride []int64)(retVal *Tensor, strideLen := len(stride) lib.AtgEmptyStridedOut(ptr, out.ctensor, size, sizeLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("EmptyStridedOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EmptyStridedOut") return retVal, err } @@ -15660,9 +16557,10 @@ func(ts *Tensor) Eq(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgEq(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Eq() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Eq") return retVal, err } @@ -15675,6 +16573,7 @@ func(ts *Tensor) Eq_(other *Scalar)(err error) { lib.AtgEq_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Eq_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15691,9 +16590,10 @@ func(ts *Tensor) EqScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tenso lib.AtgEqScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("EqScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EqScalarOut") return retVal, err } @@ -15707,9 +16607,10 @@ func(ts *Tensor) EqTensor(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgEqTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EqTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EqTensor") return retVal, err } @@ -15722,6 +16623,7 @@ func(ts *Tensor) EqTensor_(other *Tensor)(err error) { lib.AtgEqTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EqTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15738,9 +16640,10 @@ func(ts *Tensor) EqTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgEqTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("EqTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EqTensorOut") return retVal, err } @@ -15752,6 +16655,7 @@ func(ts *Tensor) Equal(other *Tensor, del bool)(retVal bool, err error) { retVal = lib.AtgEqual(ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Equal() failed: %w", err) return retVal, err } return retVal, err @@ -15766,9 +16670,10 @@ func(ts *Tensor) Erf(del bool)(retVal *Tensor, err error) { lib.AtgErf(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Erf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Erf") return retVal, err } @@ -15781,6 +16686,7 @@ func(ts *Tensor) Erf_()(err error) { lib.AtgErf_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Erf_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15797,9 +16703,10 @@ func(ts *Tensor) ErfOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgErfOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ErfOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ErfOut") return retVal, err } @@ -15813,9 +16720,10 @@ func(ts *Tensor) Erfc(del bool)(retVal *Tensor, err error) { lib.AtgErfc(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Erfc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Erfc") return retVal, err } @@ -15828,6 +16736,7 @@ func(ts *Tensor) Erfc_()(err error) { lib.AtgErfc_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Erfc_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15844,9 +16753,10 @@ func(ts *Tensor) ErfcOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgErfcOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ErfcOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ErfcOut") return retVal, err } @@ -15860,9 +16770,10 @@ func(ts *Tensor) Erfinv(del bool)(retVal *Tensor, err error) { lib.AtgErfinv(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Erfinv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Erfinv") return retVal, err } @@ -15875,6 +16786,7 @@ func(ts *Tensor) Erfinv_()(err error) { lib.AtgErfinv_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Erfinv_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15891,9 +16803,10 @@ func(ts *Tensor) ErfinvOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgErfinvOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ErfinvOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ErfinvOut") return retVal, err } @@ -15907,9 +16820,10 @@ func(ts *Tensor) Exp(del bool)(retVal *Tensor, err error) { lib.AtgExp(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Exp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Exp") return retVal, err } @@ -15923,9 +16837,10 @@ func(ts *Tensor) Exp2(del bool)(retVal *Tensor, err error) { lib.AtgExp2(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Exp2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Exp2") return retVal, err } @@ -15938,6 +16853,7 @@ func(ts *Tensor) Exp2_()(err error) { lib.AtgExp2_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Exp2_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15954,9 +16870,10 @@ func(ts *Tensor) Exp2Out(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgExp2Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Exp2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Exp2Out") return retVal, err } @@ -15969,6 +16886,7 @@ func(ts *Tensor) Exp_()(err error) { lib.AtgExp_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Exp_() failed: %w", err) return err } ts.ctensor = *ptr @@ -15985,9 +16903,10 @@ func(ts *Tensor) ExpOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgExpOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ExpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ExpOut") return retVal, err } @@ -16004,9 +16923,10 @@ cimplicit := int32(0) if implicit { cimplicit = int32(1) } lib.AtgExpand(ptr, ts.ctensor, size, sizeLen, cimplicit) if err = TorchErr(); err != nil { + err = fmt.Errorf("Expand() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Expand") return retVal, err } @@ -16020,9 +16940,10 @@ func(ts *Tensor) ExpandAs(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgExpandAs(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ExpandAs() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ExpandAs") return retVal, err } @@ -16039,9 +16960,10 @@ cimplicit := int32(0) if implicit { cimplicit = int32(1) } lib.AtgExpandCopy(ptr, ts.ctensor, size, sizeLen, cimplicit) if err = TorchErr(); err != nil { + err = fmt.Errorf("ExpandCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ExpandCopy") return retVal, err } @@ -16058,9 +16980,10 @@ cimplicit := int32(0) if implicit { cimplicit = int32(1) } lib.AtgExpandCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, cimplicit) if err = TorchErr(); err != nil { + err = fmt.Errorf("ExpandCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ExpandCopyOut") return retVal, err } @@ -16074,9 +16997,10 @@ func(ts *Tensor) Expm1(del bool)(retVal *Tensor, err error) { lib.AtgExpm1(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Expm1() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Expm1") return retVal, err } @@ -16089,6 +17013,7 @@ func(ts *Tensor) Expm1_()(err error) { lib.AtgExpm1_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Expm1_() failed: %w", err) return err } ts.ctensor = *ptr @@ -16105,9 +17030,10 @@ func(ts *Tensor) Expm1Out(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgExpm1Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Expm1Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Expm1Out") return retVal, err } @@ -16121,9 +17047,10 @@ func(ts *Tensor) Exponential(lambd float64, del bool)(retVal *Tensor, err error) lib.AtgExponential(ptr, ts.ctensor, lambd) if err = TorchErr(); err != nil { + err = fmt.Errorf("Exponential() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Exponential") return retVal, err } @@ -16136,6 +17063,7 @@ func(ts *Tensor) Exponential_(lambd float64)(err error) { lib.AtgExponential_(ptr, ts.ctensor, lambd) if err = TorchErr(); err != nil { + err = fmt.Errorf("Exponential_() failed: %w", err) return err } ts.ctensor = *ptr @@ -16152,9 +17080,10 @@ func(ts *Tensor) ExponentialOut(out *Tensor, lambd float64, del bool)(retVal *Te lib.AtgExponentialOut(ptr, out.ctensor, ts.ctensor, lambd) if err = TorchErr(); err != nil { + err = fmt.Errorf("ExponentialOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ExponentialOut") return retVal, err } @@ -16167,9 +17096,10 @@ func Eye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *T lib.AtgEye(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Eye() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Eye") return retVal, err } @@ -16182,9 +17112,10 @@ func EyeM(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device) lib.AtgEyeM(ptr, n, m, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("EyeM() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EyeM") return retVal, err } @@ -16197,9 +17128,10 @@ func EyeMOut(out *Tensor, n int64, m int64)(retVal *Tensor, err error) { lib.AtgEyeMOut(ptr, out.ctensor, n, m) if err = TorchErr(); err != nil { + err = fmt.Errorf("EyeMOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EyeMOut") return retVal, err } @@ -16212,9 +17144,10 @@ func EyeOut(out *Tensor, n int64)(retVal *Tensor, err error) { lib.AtgEyeOut(ptr, out.ctensor, n) if err = TorchErr(); err != nil { + err = fmt.Errorf("EyeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "EyeOut") return retVal, err } @@ -16228,9 +17161,10 @@ func(ts *Tensor) FakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, lib.AtgFakeQuantizePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerChannelAffine() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FakeQuantizePerChannelAffine") return retVal, err } @@ -16244,10 +17178,11 @@ func(ts *Tensor) FakeQuantizePerChannelAffineCachemask(scale *Tensor, zeroPoint lib.AtgFakeQuantizePerChannelAffineCachemask(ctensorPtr0, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerChannelAffineCachemask() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FakeQuantizePerChannelAffineCachemask_0") + retVal1 = newTensor(*ctensorPtr1, "FakeQuantizePerChannelAffineCachemask_1") return retVal0, retVal1, err } @@ -16260,9 +17195,10 @@ func FakeQuantizePerChannelAffineCachemaskBackward(grad *Tensor, mask *Tensor)(r lib.AtgFakeQuantizePerChannelAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerChannelAffineCachemaskBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FakeQuantizePerChannelAffineCachemaskBackward") return retVal, err } @@ -16276,10 +17212,11 @@ func(ts *Tensor) FakeQuantizePerChannelAffineCachemaskOut(out0 *Tensor, out1 *Te lib.AtgFakeQuantizePerChannelAffineCachemaskOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerChannelAffineCachemaskOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FakeQuantizePerChannelAffineCachemaskOut_0") + retVal1 = newTensor(*ctensorPtr1, "FakeQuantizePerChannelAffineCachemaskOut_1") return retVal0, retVal1, err } @@ -16293,9 +17230,10 @@ func(ts *Tensor) FakeQuantizePerTensorAffine(scale float64, zeroPoint int64, qua lib.AtgFakeQuantizePerTensorAffine(ptr, ts.ctensor, scale, zeroPoint, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerTensorAffine() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FakeQuantizePerTensorAffine") return retVal, err } @@ -16309,10 +17247,11 @@ func(ts *Tensor) FakeQuantizePerTensorAffineCachemask(scale float64, zeroPoint i lib.AtgFakeQuantizePerTensorAffineCachemask(ctensorPtr0, ts.ctensor, scale, zeroPoint, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerTensorAffineCachemask() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FakeQuantizePerTensorAffineCachemask_0") + retVal1 = newTensor(*ctensorPtr1, "FakeQuantizePerTensorAffineCachemask_1") return retVal0, retVal1, err } @@ -16325,9 +17264,10 @@ func FakeQuantizePerTensorAffineCachemaskBackward(grad *Tensor, mask *Tensor)(re lib.AtgFakeQuantizePerTensorAffineCachemaskBackward(ptr, grad.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerTensorAffineCachemaskBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FakeQuantizePerTensorAffineCachemaskBackward") return retVal, err } @@ -16341,10 +17281,11 @@ func(ts *Tensor) FakeQuantizePerTensorAffineCachemaskOut(out0 *Tensor, out1 *Ten lib.AtgFakeQuantizePerTensorAffineCachemaskOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, scale, zeroPoint, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerTensorAffineCachemaskOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FakeQuantizePerTensorAffineCachemaskOut_0") + retVal1 = newTensor(*ctensorPtr1, "FakeQuantizePerTensorAffineCachemaskOut_1") return retVal0, retVal1, err } @@ -16358,9 +17299,10 @@ func(ts *Tensor) FakeQuantizePerTensorAffineTensorQparams(scale *Tensor, zeroPoi lib.AtgFakeQuantizePerTensorAffineTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax) if err = TorchErr(); err != nil { + err = fmt.Errorf("FakeQuantizePerTensorAffineTensorQparams() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FakeQuantizePerTensorAffineTensorQparams") return retVal, err } @@ -16373,9 +17315,10 @@ func FbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor)(r lib.AtgFbgemmLinearFp16Weight(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FbgemmLinearFp16Weight() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FbgemmLinearFp16Weight") return retVal, err } @@ -16388,9 +17331,10 @@ func FbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, b lib.AtgFbgemmLinearFp16WeightFp32Activation(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FbgemmLinearFp16WeightFp32Activation() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FbgemmLinearFp16WeightFp32Activation") return retVal, err } @@ -16403,9 +17347,10 @@ func FbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOf lib.AtgFbgemmLinearInt8Weight(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FbgemmLinearInt8Weight() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FbgemmLinearInt8Weight") return retVal, err } @@ -16418,9 +17363,10 @@ func FbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed lib.AtgFbgemmLinearInt8WeightFp32Activation(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FbgemmLinearInt8WeightFp32Activation() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FbgemmLinearInt8WeightFp32Activation") return retVal, err } @@ -16433,9 +17379,10 @@ func FbgemmPackGemmMatrixFp16(input *Tensor)(retVal *Tensor, err error) { lib.AtgFbgemmPackGemmMatrixFp16(ptr, input.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FbgemmPackGemmMatrixFp16() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FbgemmPackGemmMatrixFp16") return retVal, err } @@ -16448,9 +17395,10 @@ func FbgemmPackQuantizedMatrix(input *Tensor)(retVal *Tensor, err error) { lib.AtgFbgemmPackQuantizedMatrix(ptr, input.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FbgemmPackQuantizedMatrix() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FbgemmPackQuantizedMatrix") return retVal, err } @@ -16463,9 +17411,10 @@ func FbgemmPackQuantizedMatrixKn(input *Tensor, k int64, n int64)(retVal *Tensor lib.AtgFbgemmPackQuantizedMatrixKn(ptr, input.ctensor, k, n) if err = TorchErr(); err != nil { + err = fmt.Errorf("FbgemmPackQuantizedMatrixKn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FbgemmPackQuantizedMatrixKn") return retVal, err } @@ -16480,9 +17429,10 @@ func FeatureAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor, e if train { ctrain = int32(1) } lib.AtgFeatureAlphaDropout(ptr, input.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("FeatureAlphaDropout() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FeatureAlphaDropout") return retVal, err } @@ -16497,6 +17447,7 @@ func(ts *Tensor) FeatureAlphaDropout_(p float64, train bool)(err error) { if train { ctrain = int32(1) } lib.AtgFeatureAlphaDropout_(ptr, ts.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("FeatureAlphaDropout_() failed: %w", err) return err } ts.ctensor = *ptr @@ -16514,9 +17465,10 @@ func FeatureDropout(input *Tensor, p float64, train bool)(retVal *Tensor, err er if train { ctrain = int32(1) } lib.AtgFeatureDropout(ptr, input.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("FeatureDropout() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FeatureDropout") return retVal, err } @@ -16531,6 +17483,7 @@ func(ts *Tensor) FeatureDropout_(p float64, train bool)(err error) { if train { ctrain = int32(1) } lib.AtgFeatureDropout_(ptr, ts.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("FeatureDropout_() failed: %w", err) return err } ts.ctensor = *ptr @@ -16553,9 +17506,10 @@ func(ts *Tensor) FftFft(n []int64, dim int64, norm string, del bool)(retVal *Ten } lib.AtgFftFft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFft() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFft") return retVal, err } @@ -16571,9 +17525,10 @@ func(ts *Tensor) FftFft2(s []int64, dim []int64, norm string, del bool)(retVal * dimLen := len(dim) lib.AtgFftFft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFft2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFft2") return retVal, err } @@ -16589,9 +17544,10 @@ func(ts *Tensor) FftFft2Out(out *Tensor, s []int64, dim []int64, norm string, de dimLen := len(dim) lib.AtgFftFft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFft2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFft2Out") return retVal, err } @@ -16611,9 +17567,10 @@ func(ts *Tensor) FftFftOut(out *Tensor, n []int64, dim int64, norm string, del b } lib.AtgFftFftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFftOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFftOut") return retVal, err } @@ -16626,9 +17583,10 @@ func FftFftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotch lib.AtgFftFftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFftfreq() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFftfreq") return retVal, err } @@ -16641,9 +17599,10 @@ func FftFftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor, err error) { lib.AtgFftFftfreqOut(ptr, out.ctensor, n, d) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFftfreqOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFftfreqOut") return retVal, err } @@ -16659,9 +17618,10 @@ func(ts *Tensor) FftFftn(s []int64, dim []int64, norm string, del bool)(retVal * dimLen := len(dim) lib.AtgFftFftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFftn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFftn") return retVal, err } @@ -16677,9 +17637,10 @@ func(ts *Tensor) FftFftnOut(out *Tensor, s []int64, dim []int64, norm string, de dimLen := len(dim) lib.AtgFftFftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFftnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFftnOut") return retVal, err } @@ -16694,9 +17655,10 @@ func(ts *Tensor) FftFftshift(dim []int64, del bool)(retVal *Tensor, err error) { dimLen := len(dim) lib.AtgFftFftshift(ptr, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftFftshift() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftFftshift") return retVal, err } @@ -16716,9 +17678,10 @@ func(ts *Tensor) FftHfft(n []int64, dim int64, norm string, del bool)(retVal *Te } lib.AtgFftHfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftHfft() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftHfft") return retVal, err } @@ -16734,9 +17697,10 @@ func(ts *Tensor) FftHfft2(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftHfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftHfft2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftHfft2") return retVal, err } @@ -16752,9 +17716,10 @@ func(ts *Tensor) FftHfft2Out(out *Tensor, s []int64, dim []int64, norm string, d dimLen := len(dim) lib.AtgFftHfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftHfft2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftHfft2Out") return retVal, err } @@ -16774,9 +17739,10 @@ func(ts *Tensor) FftHfftOut(out *Tensor, n []int64, dim int64, norm string, del } lib.AtgFftHfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftHfftOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftHfftOut") return retVal, err } @@ -16792,9 +17758,10 @@ func(ts *Tensor) FftHfftn(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftHfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftHfftn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftHfftn") return retVal, err } @@ -16810,9 +17777,10 @@ func(ts *Tensor) FftHfftnOut(out *Tensor, s []int64, dim []int64, norm string, d dimLen := len(dim) lib.AtgFftHfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftHfftnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftHfftnOut") return retVal, err } @@ -16832,9 +17800,10 @@ func(ts *Tensor) FftIfft(n []int64, dim int64, norm string, del bool)(retVal *Te } lib.AtgFftIfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIfft() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIfft") return retVal, err } @@ -16850,9 +17819,10 @@ func(ts *Tensor) FftIfft2(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftIfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIfft2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIfft2") return retVal, err } @@ -16868,9 +17838,10 @@ func(ts *Tensor) FftIfft2Out(out *Tensor, s []int64, dim []int64, norm string, d dimLen := len(dim) lib.AtgFftIfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIfft2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIfft2Out") return retVal, err } @@ -16890,9 +17861,10 @@ func(ts *Tensor) FftIfftOut(out *Tensor, n []int64, dim int64, norm string, del } lib.AtgFftIfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIfftOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIfftOut") return retVal, err } @@ -16908,9 +17880,10 @@ func(ts *Tensor) FftIfftn(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftIfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIfftn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIfftn") return retVal, err } @@ -16926,9 +17899,10 @@ func(ts *Tensor) FftIfftnOut(out *Tensor, s []int64, dim []int64, norm string, d dimLen := len(dim) lib.AtgFftIfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIfftnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIfftnOut") return retVal, err } @@ -16943,9 +17917,10 @@ func(ts *Tensor) FftIfftshift(dim []int64, del bool)(retVal *Tensor, err error) dimLen := len(dim) lib.AtgFftIfftshift(ptr, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIfftshift() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIfftshift") return retVal, err } @@ -16965,9 +17940,10 @@ func(ts *Tensor) FftIhfft(n []int64, dim int64, norm string, del bool)(retVal *T } lib.AtgFftIhfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIhfft() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIhfft") return retVal, err } @@ -16983,9 +17959,10 @@ func(ts *Tensor) FftIhfft2(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftIhfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIhfft2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIhfft2") return retVal, err } @@ -17001,9 +17978,10 @@ func(ts *Tensor) FftIhfft2Out(out *Tensor, s []int64, dim []int64, norm string, dimLen := len(dim) lib.AtgFftIhfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIhfft2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIhfft2Out") return retVal, err } @@ -17023,9 +18001,10 @@ func(ts *Tensor) FftIhfftOut(out *Tensor, n []int64, dim int64, norm string, del } lib.AtgFftIhfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIhfftOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIhfftOut") return retVal, err } @@ -17041,9 +18020,10 @@ func(ts *Tensor) FftIhfftn(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftIhfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIhfftn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIhfftn") return retVal, err } @@ -17059,9 +18039,10 @@ func(ts *Tensor) FftIhfftnOut(out *Tensor, s []int64, dim []int64, norm string, dimLen := len(dim) lib.AtgFftIhfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIhfftnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIhfftnOut") return retVal, err } @@ -17081,9 +18062,10 @@ func(ts *Tensor) FftIrfft(n []int64, dim int64, norm string, del bool)(retVal *T } lib.AtgFftIrfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIrfft() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIrfft") return retVal, err } @@ -17099,9 +18081,10 @@ func(ts *Tensor) FftIrfft2(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftIrfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIrfft2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIrfft2") return retVal, err } @@ -17117,9 +18100,10 @@ func(ts *Tensor) FftIrfft2Out(out *Tensor, s []int64, dim []int64, norm string, dimLen := len(dim) lib.AtgFftIrfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIrfft2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIrfft2Out") return retVal, err } @@ -17139,9 +18123,10 @@ func(ts *Tensor) FftIrfftOut(out *Tensor, n []int64, dim int64, norm string, del } lib.AtgFftIrfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIrfftOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIrfftOut") return retVal, err } @@ -17157,9 +18142,10 @@ func(ts *Tensor) FftIrfftn(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftIrfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIrfftn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIrfftn") return retVal, err } @@ -17175,9 +18161,10 @@ func(ts *Tensor) FftIrfftnOut(out *Tensor, s []int64, dim []int64, norm string, dimLen := len(dim) lib.AtgFftIrfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftIrfftnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftIrfftnOut") return retVal, err } @@ -17197,9 +18184,10 @@ func(ts *Tensor) FftRfft(n []int64, dim int64, norm string, del bool)(retVal *Te } lib.AtgFftRfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftRfft() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftRfft") return retVal, err } @@ -17215,9 +18203,10 @@ func(ts *Tensor) FftRfft2(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftRfft2(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftRfft2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftRfft2") return retVal, err } @@ -17233,9 +18222,10 @@ func(ts *Tensor) FftRfft2Out(out *Tensor, s []int64, dim []int64, norm string, d dimLen := len(dim) lib.AtgFftRfft2Out(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftRfft2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftRfft2Out") return retVal, err } @@ -17255,9 +18245,10 @@ func(ts *Tensor) FftRfftOut(out *Tensor, n []int64, dim int64, norm string, del } lib.AtgFftRfftOut(ptr, out.ctensor, ts.ctensor, cnVal, cnNull, dim, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftRfftOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftRfftOut") return retVal, err } @@ -17270,9 +18261,10 @@ func FftRfftfreq(n int64, d float64, optionsKind gotch.DType, optionsDevice gotc lib.AtgFftRfftfreq(ptr, n, d, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftRfftfreq() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftRfftfreq") return retVal, err } @@ -17285,9 +18277,10 @@ func FftRfftfreqOut(out *Tensor, n int64, d float64)(retVal *Tensor, err error) lib.AtgFftRfftfreqOut(ptr, out.ctensor, n, d) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftRfftfreqOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftRfftfreqOut") return retVal, err } @@ -17303,9 +18296,10 @@ func(ts *Tensor) FftRfftn(s []int64, dim []int64, norm string, del bool)(retVal dimLen := len(dim) lib.AtgFftRfftn(ptr, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftRfftn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftRfftn") return retVal, err } @@ -17321,9 +18315,10 @@ func(ts *Tensor) FftRfftnOut(out *Tensor, s []int64, dim []int64, norm string, d dimLen := len(dim) lib.AtgFftRfftnOut(ptr, out.ctensor, ts.ctensor, s, sLen, dim, dimLen, norm) if err = TorchErr(); err != nil { + err = fmt.Errorf("FftRfftnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FftRfftnOut") return retVal, err } @@ -17337,9 +18332,10 @@ func(ts *Tensor) Fill(value *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgFill(ptr, ts.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fill() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Fill") return retVal, err } @@ -17352,6 +18348,7 @@ func(ts *Tensor) Fill_(value *Scalar)(err error) { lib.AtgFill_(ptr, ts.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fill_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17369,6 +18366,7 @@ func(ts *Tensor) FillDiagonal_(fillValue *Scalar, wrap bool)(err error) { if wrap { cwrap = int32(1) } lib.AtgFillDiagonal_(ptr, ts.ctensor, fillValue.cscalar, cwrap) if err = TorchErr(); err != nil { + err = fmt.Errorf("FillDiagonal_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17385,9 +18383,10 @@ func(ts *Tensor) FillScalarOut(out *Tensor, value *Scalar, del bool)(retVal *Ten lib.AtgFillScalarOut(ptr, out.ctensor, ts.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FillScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FillScalarOut") return retVal, err } @@ -17401,9 +18400,10 @@ func(ts *Tensor) FillTensor(value *Tensor, del bool)(retVal *Tensor, err error) lib.AtgFillTensor(ptr, ts.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FillTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FillTensor") return retVal, err } @@ -17416,6 +18416,7 @@ func(ts *Tensor) FillTensor_(value *Tensor)(err error) { lib.AtgFillTensor_(ptr, ts.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FillTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17432,9 +18433,10 @@ func(ts *Tensor) FillTensorOut(out *Tensor, value *Tensor, del bool)(retVal *Ten lib.AtgFillTensorOut(ptr, out.ctensor, ts.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FillTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FillTensorOut") return retVal, err } @@ -17448,9 +18450,10 @@ func(ts *Tensor) Fix(del bool)(retVal *Tensor, err error) { lib.AtgFix(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fix() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Fix") return retVal, err } @@ -17463,6 +18466,7 @@ func(ts *Tensor) Fix_()(err error) { lib.AtgFix_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fix_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17479,9 +18483,10 @@ func(ts *Tensor) FixOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgFixOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FixOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FixOut") return retVal, err } @@ -17495,9 +18500,10 @@ func(ts *Tensor) Flatten(startDim int64, endDim int64, del bool)(retVal *Tensor, lib.AtgFlatten(ptr, ts.ctensor, startDim, endDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Flatten() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Flatten") return retVal, err } @@ -17512,9 +18518,10 @@ func FlattenDenseTensors(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgFlattenDenseTensors(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("FlattenDenseTensors() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FlattenDenseTensors") return retVal, err } @@ -17529,9 +18536,10 @@ func(ts *Tensor) Flip(dims []int64, del bool)(retVal *Tensor, err error) { dimsLen := len(dims) lib.AtgFlip(ptr, ts.ctensor, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Flip() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Flip") return retVal, err } @@ -17546,9 +18554,10 @@ func(ts *Tensor) FlipOut(out *Tensor, dims []int64, del bool)(retVal *Tensor, er dimsLen := len(dims) lib.AtgFlipOut(ptr, out.ctensor, ts.ctensor, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("FlipOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FlipOut") return retVal, err } @@ -17562,9 +18571,10 @@ func(ts *Tensor) Fliplr(del bool)(retVal *Tensor, err error) { lib.AtgFliplr(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fliplr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Fliplr") return retVal, err } @@ -17578,9 +18588,10 @@ func(ts *Tensor) Flipud(del bool)(retVal *Tensor, err error) { lib.AtgFlipud(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Flipud() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Flipud") return retVal, err } @@ -17594,9 +18605,10 @@ func(ts *Tensor) FloatPower(exponent *Tensor, del bool)(retVal *Tensor, err erro lib.AtgFloatPower(ptr, ts.ctensor, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloatPower() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloatPower") return retVal, err } @@ -17609,6 +18621,7 @@ func(ts *Tensor) FloatPower_(exponent *Scalar)(err error) { lib.AtgFloatPower_(ptr, ts.ctensor, exponent.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloatPower_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17624,9 +18637,10 @@ func FloatPowerScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err lib.AtgFloatPowerScalar(ptr, selfScalar.cscalar, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloatPowerScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloatPowerScalar") return retVal, err } @@ -17639,9 +18653,10 @@ func FloatPowerScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retV lib.AtgFloatPowerScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloatPowerScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloatPowerScalarOut") return retVal, err } @@ -17654,6 +18669,7 @@ func(ts *Tensor) FloatPowerTensor_(exponent *Tensor)(err error) { lib.AtgFloatPowerTensor_(ptr, ts.ctensor, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloatPowerTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17670,9 +18686,10 @@ func(ts *Tensor) FloatPowerTensorScalar(exponent *Scalar, del bool)(retVal *Tens lib.AtgFloatPowerTensorScalar(ptr, ts.ctensor, exponent.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloatPowerTensorScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloatPowerTensorScalar") return retVal, err } @@ -17686,9 +18703,10 @@ func(ts *Tensor) FloatPowerTensorScalarOut(out *Tensor, exponent *Scalar, del bo lib.AtgFloatPowerTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloatPowerTensorScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloatPowerTensorScalarOut") return retVal, err } @@ -17702,9 +18720,10 @@ func(ts *Tensor) FloatPowerTensorTensorOut(out *Tensor, exponent *Tensor, del bo lib.AtgFloatPowerTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloatPowerTensorTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloatPowerTensorTensorOut") return retVal, err } @@ -17718,9 +18737,10 @@ func(ts *Tensor) Floor(del bool)(retVal *Tensor, err error) { lib.AtgFloor(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Floor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Floor") return retVal, err } @@ -17733,6 +18753,7 @@ func(ts *Tensor) Floor_()(err error) { lib.AtgFloor_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Floor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17749,9 +18770,10 @@ func(ts *Tensor) FloorDivide(other *Tensor, del bool)(retVal *Tensor, err error) lib.AtgFloorDivide(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloorDivide() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloorDivide") return retVal, err } @@ -17764,6 +18786,7 @@ func(ts *Tensor) FloorDivide_(other *Tensor)(err error) { lib.AtgFloorDivide_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloorDivide_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17780,9 +18803,10 @@ func(ts *Tensor) FloorDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Te lib.AtgFloorDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloorDivideOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloorDivideOut") return retVal, err } @@ -17796,9 +18820,10 @@ func(ts *Tensor) FloorDivideScalar(other *Scalar, del bool)(retVal *Tensor, err lib.AtgFloorDivideScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloorDivideScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloorDivideScalar") return retVal, err } @@ -17811,6 +18836,7 @@ func(ts *Tensor) FloorDivideScalar_(other *Scalar)(err error) { lib.AtgFloorDivideScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloorDivideScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17827,9 +18853,10 @@ func(ts *Tensor) FloorOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgFloorOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FloorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FloorOut") return retVal, err } @@ -17843,9 +18870,10 @@ func(ts *Tensor) Fmax(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgFmax(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Fmax") return retVal, err } @@ -17859,9 +18887,10 @@ func(ts *Tensor) FmaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, e lib.AtgFmaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FmaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FmaxOut") return retVal, err } @@ -17875,9 +18904,10 @@ func(ts *Tensor) Fmin(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgFmin(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fmin() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Fmin") return retVal, err } @@ -17891,9 +18921,10 @@ func(ts *Tensor) FminOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, e lib.AtgFminOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FminOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FminOut") return retVal, err } @@ -17907,9 +18938,10 @@ func(ts *Tensor) Fmod(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgFmod(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fmod() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Fmod") return retVal, err } @@ -17922,6 +18954,7 @@ func(ts *Tensor) Fmod_(other *Scalar)(err error) { lib.AtgFmod_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Fmod_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17938,9 +18971,10 @@ func(ts *Tensor) FmodScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Ten lib.AtgFmodScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FmodScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FmodScalarOut") return retVal, err } @@ -17954,9 +18988,10 @@ func(ts *Tensor) FmodTensor(other *Tensor, del bool)(retVal *Tensor, err error) lib.AtgFmodTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FmodTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FmodTensor") return retVal, err } @@ -17969,6 +19004,7 @@ func(ts *Tensor) FmodTensor_(other *Tensor)(err error) { lib.AtgFmodTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FmodTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -17985,9 +19021,10 @@ func(ts *Tensor) FmodTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Ten lib.AtgFmodTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FmodTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FmodTensorOut") return retVal, err } @@ -18001,9 +19038,10 @@ func(ts *Tensor) Frac(del bool)(retVal *Tensor, err error) { lib.AtgFrac(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Frac() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Frac") return retVal, err } @@ -18016,6 +19054,7 @@ func(ts *Tensor) Frac_()(err error) { lib.AtgFrac_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Frac_() failed: %w", err) return err } ts.ctensor = *ptr @@ -18032,9 +19071,10 @@ func(ts *Tensor) FracOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgFracOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FracOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FracOut") return retVal, err } @@ -18050,10 +19090,11 @@ func(ts *Tensor) FractionalMaxPool2d(kernelSize []int64, outputSize []int64, ran outputSizeLen := len(outputSize) lib.AtgFractionalMaxPool2d(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, randomSamples.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FractionalMaxPool2d() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FractionalMaxPool2d_0") + retVal1 = newTensor(*ctensorPtr1, "FractionalMaxPool2d_1") return retVal0, retVal1, err } @@ -18069,9 +19110,10 @@ func(ts *Tensor) FractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []in outputSizeLen := len(outputSize) lib.AtgFractionalMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FractionalMaxPool2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FractionalMaxPool2dBackward") return retVal, err } @@ -18087,9 +19129,10 @@ func(ts *Tensor) FractionalMaxPool2dBackwardGradInput(gradInput *Tensor, gradOut outputSizeLen := len(outputSize) lib.AtgFractionalMaxPool2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FractionalMaxPool2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FractionalMaxPool2dBackwardGradInput") return retVal, err } @@ -18105,10 +19148,11 @@ func(ts *Tensor) FractionalMaxPool2dOutput(output *Tensor, indices *Tensor, kern outputSizeLen := len(outputSize) lib.AtgFractionalMaxPool2dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, randomSamples.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FractionalMaxPool2dOutput() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FractionalMaxPool2dOutput_0") + retVal1 = newTensor(*ctensorPtr1, "FractionalMaxPool2dOutput_1") return retVal0, retVal1, err } @@ -18124,10 +19168,11 @@ func(ts *Tensor) FractionalMaxPool3d(kernelSize []int64, outputSize []int64, ran outputSizeLen := len(outputSize) lib.AtgFractionalMaxPool3d(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, randomSamples.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FractionalMaxPool3d() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FractionalMaxPool3d_0") + retVal1 = newTensor(*ctensorPtr1, "FractionalMaxPool3d_1") return retVal0, retVal1, err } @@ -18143,9 +19188,10 @@ func(ts *Tensor) FractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []in outputSizeLen := len(outputSize) lib.AtgFractionalMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FractionalMaxPool3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FractionalMaxPool3dBackward") return retVal, err } @@ -18161,9 +19207,10 @@ func(ts *Tensor) FractionalMaxPool3dBackwardGradInput(gradInput *Tensor, gradOut outputSizeLen := len(outputSize) lib.AtgFractionalMaxPool3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FractionalMaxPool3dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FractionalMaxPool3dBackwardGradInput") return retVal, err } @@ -18179,10 +19226,11 @@ func(ts *Tensor) FractionalMaxPool3dOutput(output *Tensor, indices *Tensor, kern outputSizeLen := len(outputSize) lib.AtgFractionalMaxPool3dOutput(ctensorPtr0, output.ctensor, indices.ctensor, ts.ctensor, kernelSize, kernelSizeLen, outputSize, outputSizeLen, randomSamples.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FractionalMaxPool3dOutput() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FractionalMaxPool3dOutput_0") + retVal1 = newTensor(*ctensorPtr1, "FractionalMaxPool3dOutput_1") return retVal0, retVal1, err } @@ -18196,10 +19244,11 @@ func(ts *Tensor) Frexp(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { lib.AtgFrexp(ctensorPtr0, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Frexp() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Frexp_0") + retVal1 = newTensor(*ctensorPtr1, "Frexp_1") return retVal0, retVal1, err } @@ -18213,10 +19262,11 @@ func(ts *Tensor) FrexpTensorOut(mantissa *Tensor, exponent *Tensor, del bool)(re lib.AtgFrexpTensorOut(ctensorPtr0, mantissa.ctensor, exponent.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("FrexpTensorOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "FrexpTensorOut_0") + retVal1 = newTensor(*ctensorPtr1, "FrexpTensorOut_1") return retVal0, retVal1, err } @@ -18233,9 +19283,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgFrobeniusNorm(ptr, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("FrobeniusNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FrobeniusNorm") return retVal, err } @@ -18252,9 +19303,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgFrobeniusNormOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("FrobeniusNormOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FrobeniusNormOut") return retVal, err } @@ -18275,9 +19327,10 @@ var csizeVal int64 = 0 } lib.AtgFromFile(ptr, filename, cshared, csizeVal, csizeNull, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("FromFile() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FromFile") return retVal, err } @@ -18298,9 +19351,10 @@ var csizeVal int64 = 0 } lib.AtgFromFileOut(ptr, out.ctensor, filename, cshared, csizeVal, csizeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("FromFileOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FromFileOut") return retVal, err } @@ -18314,9 +19368,10 @@ func Full(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevic sizeLen := len(size) lib.AtgFull(ptr, size, sizeLen, fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Full() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Full") return retVal, err } @@ -18330,9 +19385,10 @@ func(ts *Tensor) FullLike(fillValue *Scalar, del bool)(retVal *Tensor, err error lib.AtgFullLike(ptr, ts.ctensor, fillValue.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FullLike() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FullLike") return retVal, err } @@ -18346,9 +19402,10 @@ func(ts *Tensor) FullLikeOut(out *Tensor, fillValue *Scalar, del bool)(retVal *T lib.AtgFullLikeOut(ptr, out.ctensor, ts.ctensor, fillValue.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FullLikeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FullLikeOut") return retVal, err } @@ -18362,9 +19419,10 @@ func FullOut(out *Tensor, size []int64, fillValue *Scalar)(retVal *Tensor, err e sizeLen := len(size) lib.AtgFullOut(ptr, out.ctensor, size, sizeLen, fillValue.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("FullOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FullOut") return retVal, err } @@ -18382,9 +19440,10 @@ csymmetricQuant := int32(0) if symmetricQuant { csymmetricQuant = int32(1) } lib.AtgFusedMovingAvgObsFakeQuant(ptr, ts.ctensor, observerOn.ctensor, fakeQuantOn.ctensor, runningMin.ctensor, runningMax.ctensor, scale.ctensor, zeroPoint.ctensor, averagingConst, quantMin, quantMax, chAxis, cperRowFakeQuant, csymmetricQuant) if err = TorchErr(); err != nil { + err = fmt.Errorf("FusedMovingAvgObsFakeQuant() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "FusedMovingAvgObsFakeQuant") return retVal, err } @@ -18400,9 +19459,10 @@ func(ts *Tensor) Gather(dim int64, index *Tensor, sparseGrad bool, del bool)(ret if sparseGrad { csparseGrad = int32(1) } lib.AtgGather(ptr, ts.ctensor, dim, index.ctensor, csparseGrad) if err = TorchErr(); err != nil { + err = fmt.Errorf("Gather() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Gather") return retVal, err } @@ -18418,9 +19478,10 @@ func(ts *Tensor) GatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGr if sparseGrad { csparseGrad = int32(1) } lib.AtgGatherBackward(ptr, grad.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) if err = TorchErr(); err != nil { + err = fmt.Errorf("GatherBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GatherBackward") return retVal, err } @@ -18436,9 +19497,10 @@ func(ts *Tensor) GatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad boo if sparseGrad { csparseGrad = int32(1) } lib.AtgGatherOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) if err = TorchErr(); err != nil { + err = fmt.Errorf("GatherOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GatherOut") return retVal, err } @@ -18452,9 +19514,10 @@ func(ts *Tensor) Gcd(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgGcd(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Gcd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Gcd") return retVal, err } @@ -18467,6 +19530,7 @@ func(ts *Tensor) Gcd_(other *Tensor)(err error) { lib.AtgGcd_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Gcd_() failed: %w", err) return err } ts.ctensor = *ptr @@ -18483,9 +19547,10 @@ func(ts *Tensor) GcdOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, er lib.AtgGcdOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GcdOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GcdOut") return retVal, err } @@ -18499,9 +19564,10 @@ func(ts *Tensor) Ge(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgGe(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ge() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Ge") return retVal, err } @@ -18514,6 +19580,7 @@ func(ts *Tensor) Ge_(other *Scalar)(err error) { lib.AtgGe_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ge_() failed: %w", err) return err } ts.ctensor = *ptr @@ -18530,9 +19597,10 @@ func(ts *Tensor) GeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tenso lib.AtgGeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GeScalarOut") return retVal, err } @@ -18546,9 +19614,10 @@ func(ts *Tensor) GeTensor(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgGeTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GeTensor") return retVal, err } @@ -18561,6 +19630,7 @@ func(ts *Tensor) GeTensor_(other *Tensor)(err error) { lib.AtgGeTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -18577,9 +19647,10 @@ func(ts *Tensor) GeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgGeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GeTensorOut") return retVal, err } @@ -18593,9 +19664,10 @@ func(ts *Tensor) Gelu(approximate string, del bool)(retVal *Tensor, err error) { lib.AtgGelu(ptr, ts.ctensor, approximate) if err = TorchErr(); err != nil { + err = fmt.Errorf("Gelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Gelu") return retVal, err } @@ -18608,6 +19680,7 @@ func(ts *Tensor) Gelu_(approximate string)(err error) { lib.AtgGelu_(ptr, ts.ctensor, approximate) if err = TorchErr(); err != nil { + err = fmt.Errorf("Gelu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -18624,9 +19697,10 @@ func(ts *Tensor) GeluBackward(gradOutput *Tensor, approximate string, del bool)( lib.AtgGeluBackward(ptr, gradOutput.ctensor, ts.ctensor, approximate) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeluBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GeluBackward") return retVal, err } @@ -18640,9 +19714,10 @@ func(ts *Tensor) GeluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, ap lib.AtgGeluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, approximate) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeluBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GeluBackwardGradInput") return retVal, err } @@ -18656,9 +19731,10 @@ func(ts *Tensor) GeluOut(out *Tensor, approximate string, del bool)(retVal *Tens lib.AtgGeluOut(ptr, out.ctensor, ts.ctensor, approximate) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GeluOut") return retVal, err } @@ -18672,9 +19748,10 @@ func(ts *Tensor) Geometric(p float64, del bool)(retVal *Tensor, err error) { lib.AtgGeometric(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("Geometric() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Geometric") return retVal, err } @@ -18687,6 +19764,7 @@ func(ts *Tensor) Geometric_(p float64)(err error) { lib.AtgGeometric_(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("Geometric_() failed: %w", err) return err } ts.ctensor = *ptr @@ -18703,9 +19781,10 @@ func(ts *Tensor) GeometricOut(out *Tensor, p float64, del bool)(retVal *Tensor, lib.AtgGeometricOut(ptr, out.ctensor, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeometricOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GeometricOut") return retVal, err } @@ -18719,10 +19798,11 @@ func(ts *Tensor) Geqrf(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) { lib.AtgGeqrf(ctensorPtr0, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Geqrf() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Geqrf_0") + retVal1 = newTensor(*ctensorPtr1, "Geqrf_1") return retVal0, retVal1, err } @@ -18736,10 +19816,11 @@ func(ts *Tensor) GeqrfA(a *Tensor, tau *Tensor, del bool)(retVal0 *Tensor, retVa lib.AtgGeqrfA(ctensorPtr0, a.ctensor, tau.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GeqrfA() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "GeqrfA_0") + retVal1 = newTensor(*ctensorPtr1, "GeqrfA_1") return retVal0, retVal1, err } @@ -18753,9 +19834,10 @@ func(ts *Tensor) Ger(vec2 *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgGer(ptr, ts.ctensor, vec2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ger() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Ger") return retVal, err } @@ -18769,9 +19851,10 @@ func(ts *Tensor) GerOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, err lib.AtgGerOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GerOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GerOut") return retVal, err } @@ -18785,9 +19868,10 @@ func(ts *Tensor) Glu(dim int64, del bool)(retVal *Tensor, err error) { lib.AtgGlu(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Glu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Glu") return retVal, err } @@ -18801,9 +19885,10 @@ func(ts *Tensor) GluBackward(gradOutput *Tensor, dim int64, del bool)(retVal *Te lib.AtgGluBackward(ptr, gradOutput.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("GluBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GluBackward") return retVal, err } @@ -18817,9 +19902,10 @@ func(ts *Tensor) GluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, dim lib.AtgGluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("GluBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GluBackwardGradInput") return retVal, err } @@ -18832,9 +19918,10 @@ func GluBackwardJvp(gradX *Tensor, gradGlu *Tensor, x *Tensor, dgradGlu *Tensor, lib.AtgGluBackwardJvp(ptr, gradX.ctensor, gradGlu.ctensor, x.ctensor, dgradGlu.ctensor, dx.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("GluBackwardJvp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GluBackwardJvp") return retVal, err } @@ -18847,9 +19934,10 @@ func GluBackwardJvpOut(out *Tensor, gradX *Tensor, gradGlu *Tensor, x *Tensor, d lib.AtgGluBackwardJvpOut(ptr, out.ctensor, gradX.ctensor, gradGlu.ctensor, x.ctensor, dgradGlu.ctensor, dx.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("GluBackwardJvpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GluBackwardJvpOut") return retVal, err } @@ -18862,9 +19950,10 @@ func GluJvp(glu *Tensor, x *Tensor, dx *Tensor, dim int64)(retVal *Tensor, err e lib.AtgGluJvp(ptr, glu.ctensor, x.ctensor, dx.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("GluJvp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GluJvp") return retVal, err } @@ -18877,9 +19966,10 @@ func GluJvpOut(out *Tensor, glu *Tensor, x *Tensor, dx *Tensor, dim int64)(retVa lib.AtgGluJvpOut(ptr, out.ctensor, glu.ctensor, x.ctensor, dx.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("GluJvpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GluJvpOut") return retVal, err } @@ -18893,9 +19983,10 @@ func(ts *Tensor) GluOut(out *Tensor, dim int64, del bool)(retVal *Tensor, err er lib.AtgGluOut(ptr, out.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("GluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GluOut") return retVal, err } @@ -18909,9 +20000,10 @@ func(ts *Tensor) Grad(del bool)(retVal *Tensor, err error) { lib.AtgGrad(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Grad() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Grad") return retVal, err } @@ -18925,9 +20017,10 @@ func(ts *Tensor) Greater(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgGreater(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Greater() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Greater") return retVal, err } @@ -18940,6 +20033,7 @@ func(ts *Tensor) Greater_(other *Scalar)(err error) { lib.AtgGreater_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Greater_() failed: %w", err) return err } ts.ctensor = *ptr @@ -18956,9 +20050,10 @@ func(ts *Tensor) GreaterEqual(other *Scalar, del bool)(retVal *Tensor, err error lib.AtgGreaterEqual(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterEqual() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GreaterEqual") return retVal, err } @@ -18971,6 +20066,7 @@ func(ts *Tensor) GreaterEqual_(other *Scalar)(err error) { lib.AtgGreaterEqual_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterEqual_() failed: %w", err) return err } ts.ctensor = *ptr @@ -18987,9 +20083,10 @@ func(ts *Tensor) GreaterEqualScalarOut(out *Tensor, other *Scalar, del bool)(ret lib.AtgGreaterEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterEqualScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GreaterEqualScalarOut") return retVal, err } @@ -19003,9 +20100,10 @@ func(ts *Tensor) GreaterEqualTensor(other *Tensor, del bool)(retVal *Tensor, err lib.AtgGreaterEqualTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterEqualTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GreaterEqualTensor") return retVal, err } @@ -19018,6 +20116,7 @@ func(ts *Tensor) GreaterEqualTensor_(other *Tensor)(err error) { lib.AtgGreaterEqualTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterEqualTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -19034,9 +20133,10 @@ func(ts *Tensor) GreaterEqualTensorOut(out *Tensor, other *Tensor, del bool)(ret lib.AtgGreaterEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterEqualTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GreaterEqualTensorOut") return retVal, err } @@ -19050,9 +20150,10 @@ func(ts *Tensor) GreaterScalarOut(out *Tensor, other *Scalar, del bool)(retVal * lib.AtgGreaterScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GreaterScalarOut") return retVal, err } @@ -19066,9 +20167,10 @@ func(ts *Tensor) GreaterTensor(other *Tensor, del bool)(retVal *Tensor, err erro lib.AtgGreaterTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GreaterTensor") return retVal, err } @@ -19081,6 +20183,7 @@ func(ts *Tensor) GreaterTensor_(other *Tensor)(err error) { lib.AtgGreaterTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -19097,9 +20200,10 @@ func(ts *Tensor) GreaterTensorOut(out *Tensor, other *Tensor, del bool)(retVal * lib.AtgGreaterTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GreaterTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GreaterTensorOut") return retVal, err } @@ -19114,9 +20218,10 @@ func GridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMo if alignCorners { calignCorners = int32(1) } lib.AtgGridSampler(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("GridSampler() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GridSampler") return retVal, err } @@ -19131,9 +20236,10 @@ func GridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, padding if alignCorners { calignCorners = int32(1) } lib.AtgGridSampler2d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("GridSampler2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GridSampler2d") return retVal, err } @@ -19148,9 +20254,10 @@ func GridSampler2dOut(out *Tensor, input *Tensor, grid *Tensor, interpolationMod if alignCorners { calignCorners = int32(1) } lib.AtgGridSampler2dOut(ptr, out.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("GridSampler2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GridSampler2dOut") return retVal, err } @@ -19165,9 +20272,10 @@ func GridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, padding if alignCorners { calignCorners = int32(1) } lib.AtgGridSampler3d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("GridSampler3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GridSampler3d") return retVal, err } @@ -19182,9 +20290,10 @@ func GridSampler3dOut(out *Tensor, input *Tensor, grid *Tensor, interpolationMod if alignCorners { calignCorners = int32(1) } lib.AtgGridSampler3dOut(ptr, out.ctensor, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) if err = TorchErr(); err != nil { + err = fmt.Errorf("GridSampler3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GridSampler3dOut") return retVal, err } @@ -19199,9 +20308,10 @@ func GroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps if cudnnEnabled { ccudnnEnabled = int32(1) } lib.AtgGroupNorm(ptr, input.ctensor, numGroups, weight.ctensor, bias.ctensor, eps, ccudnnEnabled) if err = TorchErr(); err != nil { + err = fmt.Errorf("GroupNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GroupNorm") return retVal, err } @@ -19224,10 +20334,11 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.AtgGru(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("Gru() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Gru_0") + retVal1 = newTensor(*ctensorPtr1, "Gru_1") return retVal0, retVal1, err } @@ -19240,9 +20351,10 @@ func GruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, b lib.AtgGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GruCell() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GruCell") return retVal, err } @@ -19263,10 +20375,11 @@ cbidirectional := int32(0) if bidirectional { cbidirectional = int32(1) } lib.AtgGruData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) if err = TorchErr(); err != nil { + err = fmt.Errorf("GruData() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "GruData_0") + retVal1 = newTensor(*ctensorPtr1, "GruData_1") return retVal0, retVal1, err } @@ -19280,9 +20393,10 @@ func(ts *Tensor) Gt(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgGt(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Gt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Gt") return retVal, err } @@ -19295,6 +20409,7 @@ func(ts *Tensor) Gt_(other *Scalar)(err error) { lib.AtgGt_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Gt_() failed: %w", err) return err } ts.ctensor = *ptr @@ -19311,9 +20426,10 @@ func(ts *Tensor) GtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tenso lib.AtgGtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("GtScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GtScalarOut") return retVal, err } @@ -19327,9 +20443,10 @@ func(ts *Tensor) GtTensor(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgGtTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GtTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GtTensor") return retVal, err } @@ -19342,6 +20459,7 @@ func(ts *Tensor) GtTensor_(other *Tensor)(err error) { lib.AtgGtTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GtTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -19358,9 +20476,10 @@ func(ts *Tensor) GtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgGtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("GtTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "GtTensorOut") return retVal, err } @@ -19373,9 +20492,10 @@ func HammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice go lib.AtgHammingWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("HammingWindow() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HammingWindow") return retVal, err } @@ -19388,9 +20508,10 @@ func HammingWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err error lib.AtgHammingWindowOut(ptr, out.ctensor, windowLength) if err = TorchErr(); err != nil { + err = fmt.Errorf("HammingWindowOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HammingWindowOut") return retVal, err } @@ -19405,9 +20526,10 @@ func HammingWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch. if periodic { cperiodic = int32(1) } lib.AtgHammingWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("HammingWindowPeriodic() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HammingWindowPeriodic") return retVal, err } @@ -19422,9 +20544,10 @@ func HammingWindowPeriodicAlpha(windowLength int64, periodic bool, alpha float64 if periodic { cperiodic = int32(1) } lib.AtgHammingWindowPeriodicAlpha(ptr, windowLength, cperiodic, alpha, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("HammingWindowPeriodicAlpha() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HammingWindowPeriodicAlpha") return retVal, err } @@ -19439,9 +20562,10 @@ func HammingWindowPeriodicAlphaBeta(windowLength int64, periodic bool, alpha flo if periodic { cperiodic = int32(1) } lib.AtgHammingWindowPeriodicAlphaBeta(ptr, windowLength, cperiodic, alpha, beta, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("HammingWindowPeriodicAlphaBeta() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HammingWindowPeriodicAlphaBeta") return retVal, err } @@ -19456,9 +20580,10 @@ func HammingWindowPeriodicAlphaBetaOut(out *Tensor, windowLength int64, periodic if periodic { cperiodic = int32(1) } lib.AtgHammingWindowPeriodicAlphaBetaOut(ptr, out.ctensor, windowLength, cperiodic, alpha, beta) if err = TorchErr(); err != nil { + err = fmt.Errorf("HammingWindowPeriodicAlphaBetaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HammingWindowPeriodicAlphaBetaOut") return retVal, err } @@ -19473,9 +20598,10 @@ func HammingWindowPeriodicAlphaOut(out *Tensor, windowLength int64, periodic boo if periodic { cperiodic = int32(1) } lib.AtgHammingWindowPeriodicAlphaOut(ptr, out.ctensor, windowLength, cperiodic, alpha) if err = TorchErr(); err != nil { + err = fmt.Errorf("HammingWindowPeriodicAlphaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HammingWindowPeriodicAlphaOut") return retVal, err } @@ -19490,9 +20616,10 @@ func HammingWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(re if periodic { cperiodic = int32(1) } lib.AtgHammingWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic) if err = TorchErr(); err != nil { + err = fmt.Errorf("HammingWindowPeriodicOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HammingWindowPeriodicOut") return retVal, err } @@ -19505,9 +20632,10 @@ func HannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch lib.AtgHannWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("HannWindow() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HannWindow") return retVal, err } @@ -19520,9 +20648,10 @@ func HannWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err error) { lib.AtgHannWindowOut(ptr, out.ctensor, windowLength) if err = TorchErr(); err != nil { + err = fmt.Errorf("HannWindowOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HannWindowOut") return retVal, err } @@ -19537,9 +20666,10 @@ func HannWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.DTy if periodic { cperiodic = int32(1) } lib.AtgHannWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("HannWindowPeriodic() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HannWindowPeriodic") return retVal, err } @@ -19554,9 +20684,10 @@ func HannWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(retVa if periodic { cperiodic = int32(1) } lib.AtgHannWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic) if err = TorchErr(); err != nil { + err = fmt.Errorf("HannWindowPeriodicOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HannWindowPeriodicOut") return retVal, err } @@ -19570,9 +20701,10 @@ func(ts *Tensor) Hardshrink(del bool)(retVal *Tensor, err error) { lib.AtgHardshrink(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hardshrink() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Hardshrink") return retVal, err } @@ -19586,9 +20718,10 @@ func(ts *Tensor) HardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool)(re lib.AtgHardshrinkBackward(ptr, gradOut.ctensor, ts.ctensor, lambd.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardshrinkBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardshrinkBackward") return retVal, err } @@ -19602,9 +20735,10 @@ func(ts *Tensor) HardshrinkBackwardGradInput(gradInput *Tensor, gradOut *Tensor, lib.AtgHardshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOut.ctensor, ts.ctensor, lambd.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardshrinkBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardshrinkBackwardGradInput") return retVal, err } @@ -19618,9 +20752,10 @@ func(ts *Tensor) HardshrinkOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgHardshrinkOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardshrinkOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardshrinkOut") return retVal, err } @@ -19634,9 +20769,10 @@ func(ts *Tensor) Hardsigmoid(del bool)(retVal *Tensor, err error) { lib.AtgHardsigmoid(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hardsigmoid() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Hardsigmoid") return retVal, err } @@ -19649,6 +20785,7 @@ func(ts *Tensor) Hardsigmoid_()(err error) { lib.AtgHardsigmoid_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hardsigmoid_() failed: %w", err) return err } ts.ctensor = *ptr @@ -19665,9 +20802,10 @@ func(ts *Tensor) HardsigmoidBackward(gradOutput *Tensor, del bool)(retVal *Tenso lib.AtgHardsigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardsigmoidBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardsigmoidBackward") return retVal, err } @@ -19681,9 +20819,10 @@ func(ts *Tensor) HardsigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Ten lib.AtgHardsigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardsigmoidBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardsigmoidBackwardGradInput") return retVal, err } @@ -19697,9 +20836,10 @@ func(ts *Tensor) HardsigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error lib.AtgHardsigmoidOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardsigmoidOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardsigmoidOut") return retVal, err } @@ -19713,9 +20853,10 @@ func(ts *Tensor) Hardswish(del bool)(retVal *Tensor, err error) { lib.AtgHardswish(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hardswish() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Hardswish") return retVal, err } @@ -19728,6 +20869,7 @@ func(ts *Tensor) Hardswish_()(err error) { lib.AtgHardswish_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hardswish_() failed: %w", err) return err } ts.ctensor = *ptr @@ -19744,9 +20886,10 @@ func(ts *Tensor) HardswishBackward(gradOutput *Tensor, del bool)(retVal *Tensor, lib.AtgHardswishBackward(ptr, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardswishBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardswishBackward") return retVal, err } @@ -19760,9 +20903,10 @@ func(ts *Tensor) HardswishBackwardOut(out *Tensor, gradOutput *Tensor, del bool) lib.AtgHardswishBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardswishBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardswishBackwardOut") return retVal, err } @@ -19776,9 +20920,10 @@ func(ts *Tensor) HardswishOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgHardswishOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardswishOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardswishOut") return retVal, err } @@ -19792,9 +20937,10 @@ func(ts *Tensor) Hardtanh(del bool)(retVal *Tensor, err error) { lib.AtgHardtanh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hardtanh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Hardtanh") return retVal, err } @@ -19807,6 +20953,7 @@ func(ts *Tensor) Hardtanh_()(err error) { lib.AtgHardtanh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hardtanh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -19823,9 +20970,10 @@ func(ts *Tensor) HardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Sc lib.AtgHardtanhBackward(ptr, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardtanhBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardtanhBackward") return retVal, err } @@ -19839,9 +20987,10 @@ func(ts *Tensor) HardtanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor lib.AtgHardtanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardtanhBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardtanhBackwardGradInput") return retVal, err } @@ -19855,9 +21004,10 @@ func(ts *Tensor) HardtanhOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgHardtanhOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HardtanhOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HardtanhOut") return retVal, err } @@ -19871,9 +21021,10 @@ func(ts *Tensor) Heaviside(values *Tensor, del bool)(retVal *Tensor, err error) lib.AtgHeaviside(ptr, ts.ctensor, values.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Heaviside() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Heaviside") return retVal, err } @@ -19886,6 +21037,7 @@ func(ts *Tensor) Heaviside_(values *Tensor)(err error) { lib.AtgHeaviside_(ptr, ts.ctensor, values.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Heaviside_() failed: %w", err) return err } ts.ctensor = *ptr @@ -19902,9 +21054,10 @@ func(ts *Tensor) HeavisideOut(out *Tensor, values *Tensor, del bool)(retVal *Ten lib.AtgHeavisideOut(ptr, out.ctensor, ts.ctensor, values.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HeavisideOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HeavisideOut") return retVal, err } @@ -19918,9 +21071,10 @@ func(ts *Tensor) HingeEmbeddingLoss(target *Tensor, margin float64, reduction in lib.AtgHingeEmbeddingLoss(ptr, ts.ctensor, target.ctensor, margin, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("HingeEmbeddingLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HingeEmbeddingLoss") return retVal, err } @@ -19934,9 +21088,10 @@ func(ts *Tensor) Histc(bins int64, del bool)(retVal *Tensor, err error) { lib.AtgHistc(ptr, ts.ctensor, bins) if err = TorchErr(); err != nil { + err = fmt.Errorf("Histc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Histc") return retVal, err } @@ -19950,9 +21105,10 @@ func(ts *Tensor) HistcOut(out *Tensor, bins int64, del bool)(retVal *Tensor, err lib.AtgHistcOut(ptr, out.ctensor, ts.ctensor, bins) if err = TorchErr(); err != nil { + err = fmt.Errorf("HistcOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HistcOut") return retVal, err } @@ -19965,9 +21121,10 @@ func Hspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor, err error) { lib.AtgHspmm(ptr, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hspmm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Hspmm") return retVal, err } @@ -19980,9 +21137,10 @@ func HspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor)(retVal *Tensor, err error lib.AtgHspmmOut(ptr, out.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HspmmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HspmmOut") return retVal, err } @@ -19997,9 +21155,10 @@ func Hstack(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgHstack(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hstack() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Hstack") return retVal, err } @@ -20014,9 +21173,10 @@ func HstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgHstackOut(ptr, out.ctensor, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("HstackOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HstackOut") return retVal, err } @@ -20030,9 +21190,10 @@ func(ts *Tensor) HuberLoss(target *Tensor, reduction int64, delta float64, del b lib.AtgHuberLoss(ptr, ts.ctensor, target.ctensor, reduction, delta) if err = TorchErr(); err != nil { + err = fmt.Errorf("HuberLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HuberLoss") return retVal, err } @@ -20046,9 +21207,10 @@ func(ts *Tensor) HuberLossBackward(gradOutput *Tensor, target *Tensor, reduction lib.AtgHuberLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta) if err = TorchErr(); err != nil { + err = fmt.Errorf("HuberLossBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HuberLossBackward") return retVal, err } @@ -20062,9 +21224,10 @@ func(ts *Tensor) HuberLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, tar lib.AtgHuberLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, delta) if err = TorchErr(); err != nil { + err = fmt.Errorf("HuberLossBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HuberLossBackwardOut") return retVal, err } @@ -20078,9 +21241,10 @@ func(ts *Tensor) HuberLossOut(out *Tensor, target *Tensor, reduction int64, delt lib.AtgHuberLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, delta) if err = TorchErr(); err != nil { + err = fmt.Errorf("HuberLossOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HuberLossOut") return retVal, err } @@ -20094,9 +21258,10 @@ func(ts *Tensor) Hypot(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgHypot(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hypot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Hypot") return retVal, err } @@ -20109,6 +21274,7 @@ func(ts *Tensor) Hypot_(other *Tensor)(err error) { lib.AtgHypot_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Hypot_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20125,9 +21291,10 @@ func(ts *Tensor) HypotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, lib.AtgHypotOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("HypotOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "HypotOut") return retVal, err } @@ -20141,9 +21308,10 @@ func(ts *Tensor) I0(del bool)(retVal *Tensor, err error) { lib.AtgI0(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("I0() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "I0") return retVal, err } @@ -20156,6 +21324,7 @@ func(ts *Tensor) I0_()(err error) { lib.AtgI0_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("I0_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20172,9 +21341,10 @@ func(ts *Tensor) I0Out(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgI0Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("I0Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "I0Out") return retVal, err } @@ -20188,9 +21358,10 @@ func(ts *Tensor) Igamma(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgIgamma(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Igamma() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Igamma") return retVal, err } @@ -20203,6 +21374,7 @@ func(ts *Tensor) Igamma_(other *Tensor)(err error) { lib.AtgIgamma_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Igamma_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20219,9 +21391,10 @@ func(ts *Tensor) IgammaOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, lib.AtgIgammaOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IgammaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IgammaOut") return retVal, err } @@ -20235,9 +21408,10 @@ func(ts *Tensor) Igammac(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgIgammac(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Igammac() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Igammac") return retVal, err } @@ -20250,6 +21424,7 @@ func(ts *Tensor) Igammac_(other *Tensor)(err error) { lib.AtgIgammac_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Igammac_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20266,9 +21441,10 @@ func(ts *Tensor) IgammacOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor lib.AtgIgammacOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IgammacOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IgammacOut") return retVal, err } @@ -20286,9 +21462,10 @@ paddingLen := len(padding) strideLen := len(stride) lib.AtgIm2col(ptr, ts.ctensor, kernelSize, kernelSizeLen, dilation, dilationLen, padding, paddingLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Im2col() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Im2col") return retVal, err } @@ -20306,9 +21483,10 @@ paddingLen := len(padding) strideLen := len(stride) lib.AtgIm2colOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, dilation, dilationLen, padding, paddingLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Im2colOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Im2colOut") return retVal, err } @@ -20322,9 +21500,10 @@ func(ts *Tensor) Imag(del bool)(retVal *Tensor, err error) { lib.AtgImag(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Imag() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Imag") return retVal, err } @@ -20338,9 +21517,10 @@ func(ts *Tensor) IndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(re lib.AtgIndexAdd(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexAdd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexAdd") return retVal, err } @@ -20353,6 +21533,7 @@ func(ts *Tensor) IndexAdd_(dim int64, index *Tensor, source *Tensor)(err error) lib.AtgIndexAdd_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexAdd_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20369,9 +21550,10 @@ func(ts *Tensor) IndexAddOut(out *Tensor, dim int64, index *Tensor, source *Tens lib.AtgIndexAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexAddOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexAddOut") return retVal, err } @@ -20385,9 +21567,10 @@ func(ts *Tensor) IndexCopy(dim int64, index *Tensor, source *Tensor, del bool)(r lib.AtgIndexCopy(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexCopy") return retVal, err } @@ -20400,6 +21583,7 @@ func(ts *Tensor) IndexCopy_(dim int64, index *Tensor, source *Tensor)(err error) lib.AtgIndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexCopy_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20416,9 +21600,10 @@ func(ts *Tensor) IndexCopyOut(out *Tensor, dim int64, index *Tensor, source *Ten lib.AtgIndexCopyOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexCopyOut") return retVal, err } @@ -20432,9 +21617,10 @@ func(ts *Tensor) IndexFill(dim int64, index *Tensor, value *Scalar, del bool)(re lib.AtgIndexFill(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexFill() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexFill") return retVal, err } @@ -20447,6 +21633,7 @@ func(ts *Tensor) IndexFill_(dim int64, index *Tensor, value *Scalar)(err error) lib.AtgIndexFill_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexFill_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20463,9 +21650,10 @@ func(ts *Tensor) IndexFillIntScalarOut(out *Tensor, dim int64, index *Tensor, va lib.AtgIndexFillIntScalarOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexFillIntScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexFillIntScalarOut") return retVal, err } @@ -20479,9 +21667,10 @@ func(ts *Tensor) IndexFillIntTensor(dim int64, index *Tensor, value *Tensor, del lib.AtgIndexFillIntTensor(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexFillIntTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexFillIntTensor") return retVal, err } @@ -20494,6 +21683,7 @@ func(ts *Tensor) IndexFillIntTensor_(dim int64, index *Tensor, value *Tensor)(er lib.AtgIndexFillIntTensor_(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexFillIntTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20510,9 +21700,10 @@ func(ts *Tensor) IndexFillIntTensorOut(out *Tensor, dim int64, index *Tensor, va lib.AtgIndexFillIntTensorOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexFillIntTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexFillIntTensorOut") return retVal, err } @@ -20530,9 +21721,10 @@ caccumulate := int32(0) if accumulate { caccumulate = int32(1) } lib.AtgIndexPutOut(ptr, out.ctensor, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexPutOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexPutOut") return retVal, err } @@ -20548,9 +21740,10 @@ func(ts *Tensor) IndexReduce(dim int64, index *Tensor, source *Tensor, reduce st if includeSelf { cincludeSelf = int32(1) } lib.AtgIndexReduce(ptr, ts.ctensor, dim, index.ctensor, source.ctensor, reduce, cincludeSelf) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexReduce() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexReduce") return retVal, err } @@ -20565,6 +21758,7 @@ func(ts *Tensor) IndexReduce_(dim int64, index *Tensor, source *Tensor, reduce s if includeSelf { cincludeSelf = int32(1) } lib.AtgIndexReduce_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor, reduce, cincludeSelf) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexReduce_() failed: %w", err) return err } ts.ctensor = *ptr @@ -20583,9 +21777,10 @@ func(ts *Tensor) IndexReduceOut(out *Tensor, dim int64, index *Tensor, source *T if includeSelf { cincludeSelf = int32(1) } lib.AtgIndexReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, source.ctensor, reduce, cincludeSelf) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexReduceOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexReduceOut") return retVal, err } @@ -20599,9 +21794,10 @@ func(ts *Tensor) IndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor, lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexSelect() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexSelect") return retVal, err } @@ -20615,9 +21811,10 @@ func IndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tens selfSizesLen := len(selfSizes) lib.AtgIndexSelectBackward(ptr, grad.ctensor, selfSizes, selfSizesLen, dim, index.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexSelectBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexSelectBackward") return retVal, err } @@ -20631,9 +21828,10 @@ func(ts *Tensor) IndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool) lib.AtgIndexSelectOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexSelectOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexSelectOut") return retVal, err } @@ -20649,9 +21847,10 @@ func(ts *Tensor) IndexTensorOut(out *Tensor, indices []*Tensor, del bool)(retVal for _, t := range indices {cindices = append(cindices, t.ctensor)} lib.AtgIndexTensorOut(ptr, out.ctensor, ts.ctensor, cindices, len(cindices)) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndexTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndexTensorOut") return retVal, err } @@ -20665,9 +21864,10 @@ func(ts *Tensor) Indices(del bool)(retVal *Tensor, err error) { lib.AtgIndices(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Indices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Indices") return retVal, err } @@ -20681,9 +21881,10 @@ func(ts *Tensor) IndicesCopy(del bool)(retVal *Tensor, err error) { lib.AtgIndicesCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndicesCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndicesCopy") return retVal, err } @@ -20697,9 +21898,10 @@ func(ts *Tensor) IndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error lib.AtgIndicesCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IndicesCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IndicesCopyOut") return retVal, err } @@ -20713,9 +21915,10 @@ func(ts *Tensor) InfinitelyDifferentiableGeluBackward(grad *Tensor, del bool)(re lib.AtgInfinitelyDifferentiableGeluBackward(ptr, grad.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("InfinitelyDifferentiableGeluBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "InfinitelyDifferentiableGeluBackward") return retVal, err } @@ -20729,9 +21932,10 @@ func(ts *Tensor) Inner(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgInner(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Inner() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Inner") return retVal, err } @@ -20745,9 +21949,10 @@ func(ts *Tensor) InnerOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, lib.AtgInnerOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("InnerOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "InnerOut") return retVal, err } @@ -20764,9 +21969,10 @@ ccudnnEnabled := int32(0) if cudnnEnabled { ccudnnEnabled = int32(1) } lib.AtgInstanceNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, cuseInputStats, momentum, eps, ccudnnEnabled) if err = TorchErr(); err != nil { + err = fmt.Errorf("InstanceNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "InstanceNorm") return retVal, err } @@ -20780,9 +21986,10 @@ func(ts *Tensor) IntRepr(del bool)(retVal *Tensor, err error) { lib.AtgIntRepr(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IntRepr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IntRepr") return retVal, err } @@ -20796,9 +22003,10 @@ func(ts *Tensor) IntReprOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgIntReprOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IntReprOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IntReprOut") return retVal, err } @@ -20812,9 +22020,10 @@ func(ts *Tensor) Inverse(del bool)(retVal *Tensor, err error) { lib.AtgInverse(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Inverse() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Inverse") return retVal, err } @@ -20828,9 +22037,10 @@ func(ts *Tensor) InverseOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgInverseOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("InverseOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "InverseOut") return retVal, err } @@ -20842,6 +22052,7 @@ func(ts *Tensor) IsCoalesced(del bool)(retVal bool, err error) { retVal = lib.AtgIsCoalesced(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsCoalesced() failed: %w", err) return retVal, err } return retVal, err @@ -20854,6 +22065,7 @@ func(ts *Tensor) IsComplex(del bool)(retVal bool, err error) { retVal = lib.AtgIsComplex(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsComplex() failed: %w", err) return retVal, err } return retVal, err @@ -20866,6 +22078,7 @@ func(ts *Tensor) IsConj(del bool)(retVal bool, err error) { retVal = lib.AtgIsConj(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsConj() failed: %w", err) return retVal, err } return retVal, err @@ -20878,6 +22091,7 @@ func(ts *Tensor) IsDistributed(del bool)(retVal bool, err error) { retVal = lib.AtgIsDistributed(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsDistributed() failed: %w", err) return retVal, err } return retVal, err @@ -20890,6 +22104,7 @@ func(ts *Tensor) IsFloatingPoint(del bool)(retVal bool, err error) { retVal = lib.AtgIsFloatingPoint(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsFloatingPoint() failed: %w", err) return retVal, err } return retVal, err @@ -20902,6 +22117,7 @@ func(ts *Tensor) IsInference(del bool)(retVal bool, err error) { retVal = lib.AtgIsInference(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsInference() failed: %w", err) return retVal, err } return retVal, err @@ -20914,6 +22130,7 @@ func(ts *Tensor) IsLeaf(del bool)(retVal bool, err error) { retVal = lib.AtgIsLeaf(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsLeaf() failed: %w", err) return retVal, err } return retVal, err @@ -20926,6 +22143,7 @@ func(ts *Tensor) IsNeg(del bool)(retVal bool, err error) { retVal = lib.AtgIsNeg(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsNeg() failed: %w", err) return retVal, err } return retVal, err @@ -20938,6 +22156,7 @@ func(ts *Tensor) IsNonzero(del bool)(retVal bool, err error) { retVal = lib.AtgIsNonzero(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsNonzero() failed: %w", err) return retVal, err } return retVal, err @@ -20950,6 +22169,7 @@ func(ts *Tensor) IsPinned(device gotch.Device, del bool)(retVal bool, err error) retVal = lib.AtgIsPinned(ts.ctensor, device.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsPinned() failed: %w", err) return retVal, err } return retVal, err @@ -20962,6 +22182,7 @@ func(ts *Tensor) IsSameSize(other *Tensor, del bool)(retVal bool, err error) { retVal = lib.AtgIsSameSize(ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsSameSize() failed: %w", err) return retVal, err } return retVal, err @@ -20974,6 +22195,7 @@ func(ts *Tensor) IsSetTo(tensor *Tensor, del bool)(retVal bool, err error) { retVal = lib.AtgIsSetTo(ts.ctensor, tensor.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsSetTo() failed: %w", err) return retVal, err } return retVal, err @@ -20986,6 +22208,7 @@ func(ts *Tensor) IsSigned(del bool)(retVal bool, err error) { retVal = lib.AtgIsSigned(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsSigned() failed: %w", err) return retVal, err } return retVal, err @@ -20997,6 +22220,7 @@ func IsVulkanAvailable()(retVal bool, err error) { retVal = lib.AtgIsVulkanAvailable() if err = TorchErr(); err != nil { + err = fmt.Errorf("IsVulkanAvailable() failed: %w", err) return retVal, err } return retVal, err @@ -21013,9 +22237,10 @@ func(ts *Tensor) Isclose(other *Tensor, rtol float64, atol float64, equalNan boo if equalNan { cequalNan = int32(1) } lib.AtgIsclose(ptr, ts.ctensor, other.ctensor, rtol, atol, cequalNan) if err = TorchErr(); err != nil { + err = fmt.Errorf("Isclose() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Isclose") return retVal, err } @@ -21029,9 +22254,10 @@ func(ts *Tensor) Isfinite(del bool)(retVal *Tensor, err error) { lib.AtgIsfinite(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Isfinite() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Isfinite") return retVal, err } @@ -21048,9 +22274,10 @@ cinvert := int32(0) if invert { cinvert = int32(1) } lib.AtgIsin(ptr, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert) if err = TorchErr(); err != nil { + err = fmt.Errorf("Isin() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Isin") return retVal, err } @@ -21067,9 +22294,10 @@ cinvert := int32(0) if invert { cinvert = int32(1) } lib.AtgIsinScalarTensor(ptr, element.cscalar, testElements.ctensor, cassumeUnique, cinvert) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsinScalarTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsinScalarTensor") return retVal, err } @@ -21086,9 +22314,10 @@ cinvert := int32(0) if invert { cinvert = int32(1) } lib.AtgIsinScalarTensorOut(ptr, out.ctensor, element.cscalar, testElements.ctensor, cassumeUnique, cinvert) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsinScalarTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsinScalarTensorOut") return retVal, err } @@ -21105,9 +22334,10 @@ cinvert := int32(0) if invert { cinvert = int32(1) } lib.AtgIsinTensorScalar(ptr, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsinTensorScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsinTensorScalar") return retVal, err } @@ -21124,9 +22354,10 @@ cinvert := int32(0) if invert { cinvert = int32(1) } lib.AtgIsinTensorScalarOut(ptr, out.ctensor, elements.ctensor, testElement.cscalar, cassumeUnique, cinvert) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsinTensorScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsinTensorScalarOut") return retVal, err } @@ -21143,9 +22374,10 @@ cinvert := int32(0) if invert { cinvert = int32(1) } lib.AtgIsinTensorTensorOut(ptr, out.ctensor, elements.ctensor, testElements.ctensor, cassumeUnique, cinvert) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsinTensorTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsinTensorTensorOut") return retVal, err } @@ -21159,9 +22391,10 @@ func(ts *Tensor) Isinf(del bool)(retVal *Tensor, err error) { lib.AtgIsinf(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Isinf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Isinf") return retVal, err } @@ -21175,9 +22408,10 @@ func(ts *Tensor) IsinfOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgIsinfOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsinfOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsinfOut") return retVal, err } @@ -21191,9 +22425,10 @@ func(ts *Tensor) Isnan(del bool)(retVal *Tensor, err error) { lib.AtgIsnan(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Isnan() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Isnan") return retVal, err } @@ -21207,9 +22442,10 @@ func(ts *Tensor) IsnanOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgIsnanOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsnanOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsnanOut") return retVal, err } @@ -21223,9 +22459,10 @@ func(ts *Tensor) Isneginf(del bool)(retVal *Tensor, err error) { lib.AtgIsneginf(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Isneginf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Isneginf") return retVal, err } @@ -21239,9 +22476,10 @@ func(ts *Tensor) IsneginfOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgIsneginfOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsneginfOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsneginfOut") return retVal, err } @@ -21255,9 +22493,10 @@ func(ts *Tensor) Isposinf(del bool)(retVal *Tensor, err error) { lib.AtgIsposinf(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Isposinf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Isposinf") return retVal, err } @@ -21271,9 +22510,10 @@ func(ts *Tensor) IsposinfOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgIsposinfOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("IsposinfOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "IsposinfOut") return retVal, err } @@ -21287,9 +22527,10 @@ func(ts *Tensor) Isreal(del bool)(retVal *Tensor, err error) { lib.AtgIsreal(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Isreal() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Isreal") return retVal, err } @@ -21329,9 +22570,10 @@ creturnComplex := int32(0) if returnComplex { creturnComplex = int32(1) } lib.AtgIstft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, ccenter, cnormalized, conesided, clengthVal, clengthNull, creturnComplex) if err = TorchErr(); err != nil { + err = fmt.Errorf("Istft() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Istft") return retVal, err } @@ -21344,9 +22586,10 @@ func KaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice got lib.AtgKaiserWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("KaiserWindow() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "KaiserWindow") return retVal, err } @@ -21361,9 +22604,10 @@ func KaiserWindowBeta(windowLength int64, periodic bool, beta float64, optionsKi if periodic { cperiodic = int32(1) } lib.AtgKaiserWindowBeta(ptr, windowLength, cperiodic, beta, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("KaiserWindowBeta() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "KaiserWindowBeta") return retVal, err } @@ -21378,9 +22622,10 @@ func KaiserWindowBetaOut(out *Tensor, windowLength int64, periodic bool, beta fl if periodic { cperiodic = int32(1) } lib.AtgKaiserWindowBetaOut(ptr, out.ctensor, windowLength, cperiodic, beta) if err = TorchErr(); err != nil { + err = fmt.Errorf("KaiserWindowBetaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "KaiserWindowBetaOut") return retVal, err } @@ -21393,9 +22638,10 @@ func KaiserWindowOut(out *Tensor, windowLength int64)(retVal *Tensor, err error) lib.AtgKaiserWindowOut(ptr, out.ctensor, windowLength) if err = TorchErr(); err != nil { + err = fmt.Errorf("KaiserWindowOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "KaiserWindowOut") return retVal, err } @@ -21410,9 +22656,10 @@ func KaiserWindowPeriodic(windowLength int64, periodic bool, optionsKind gotch.D if periodic { cperiodic = int32(1) } lib.AtgKaiserWindowPeriodic(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("KaiserWindowPeriodic() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "KaiserWindowPeriodic") return retVal, err } @@ -21427,9 +22674,10 @@ func KaiserWindowPeriodicOut(out *Tensor, windowLength int64, periodic bool)(ret if periodic { cperiodic = int32(1) } lib.AtgKaiserWindowPeriodicOut(ptr, out.ctensor, windowLength, cperiodic) if err = TorchErr(); err != nil { + err = fmt.Errorf("KaiserWindowPeriodicOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "KaiserWindowPeriodicOut") return retVal, err } @@ -21445,9 +22693,10 @@ func(ts *Tensor) KlDiv(target *Tensor, reduction int64, logTarget bool, del bool if logTarget { clogTarget = int32(1) } lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction, clogTarget) if err = TorchErr(); err != nil { + err = fmt.Errorf("KlDiv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "KlDiv") return retVal, err } @@ -21461,9 +22710,10 @@ func(ts *Tensor) Kron(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgKron(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Kron() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Kron") return retVal, err } @@ -21477,9 +22727,10 @@ func(ts *Tensor) KronOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, e lib.AtgKronOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("KronOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "KronOut") return retVal, err } @@ -21495,10 +22746,11 @@ func(ts *Tensor) Kthvalue(k int64, dim int64, keepdim bool, del bool)(retVal0 *T if keepdim { ckeepdim = int32(1) } lib.AtgKthvalue(ctensorPtr0, ts.ctensor, k, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Kthvalue() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Kthvalue_0") + retVal1 = newTensor(*ctensorPtr1, "Kthvalue_1") return retVal0, retVal1, err } @@ -21514,10 +22766,11 @@ func(ts *Tensor) KthvalueValues(values *Tensor, indices *Tensor, k int64, dim in if keepdim { ckeepdim = int32(1) } lib.AtgKthvalueValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("KthvalueValues() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "KthvalueValues_0") + retVal1 = newTensor(*ctensorPtr1, "KthvalueValues_1") return retVal0, retVal1, err } @@ -21531,9 +22784,10 @@ func(ts *Tensor) L1Loss(target *Tensor, reduction int64, del bool)(retVal *Tenso lib.AtgL1Loss(ptr, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("L1Loss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "L1Loss") return retVal, err } @@ -21549,9 +22803,10 @@ ccudnnEnable := int32(0) if cudnnEnable { ccudnnEnable = int32(1) } lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, normalizedShapeLen, weight.ctensor, bias.ctensor, eps, ccudnnEnable) if err = TorchErr(); err != nil { + err = fmt.Errorf("LayerNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LayerNorm") return retVal, err } @@ -21565,9 +22820,10 @@ func(ts *Tensor) Lcm(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLcm(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lcm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Lcm") return retVal, err } @@ -21580,6 +22836,7 @@ func(ts *Tensor) Lcm_(other *Tensor)(err error) { lib.AtgLcm_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lcm_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21596,9 +22853,10 @@ func(ts *Tensor) LcmOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, er lib.AtgLcmOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LcmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LcmOut") return retVal, err } @@ -21612,9 +22870,10 @@ func(ts *Tensor) Ldexp(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLdexp(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ldexp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Ldexp") return retVal, err } @@ -21627,6 +22886,7 @@ func(ts *Tensor) Ldexp_(other *Tensor)(err error) { lib.AtgLdexp_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ldexp_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21643,9 +22903,10 @@ func(ts *Tensor) LdexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, lib.AtgLdexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LdexpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LdexpOut") return retVal, err } @@ -21659,9 +22920,10 @@ func(ts *Tensor) Le(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgLe(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Le() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Le") return retVal, err } @@ -21674,6 +22936,7 @@ func(ts *Tensor) Le_(other *Scalar)(err error) { lib.AtgLe_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Le_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21690,9 +22953,10 @@ func(ts *Tensor) LeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tenso lib.AtgLeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LeScalarOut") return retVal, err } @@ -21706,9 +22970,10 @@ func(ts *Tensor) LeTensor(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLeTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LeTensor") return retVal, err } @@ -21721,6 +22986,7 @@ func(ts *Tensor) LeTensor_(other *Tensor)(err error) { lib.AtgLeTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21737,9 +23003,10 @@ func(ts *Tensor) LeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgLeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LeTensorOut") return retVal, err } @@ -21753,9 +23020,10 @@ func(ts *Tensor) LeakyRelu(del bool)(retVal *Tensor, err error) { lib.AtgLeakyRelu(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeakyRelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LeakyRelu") return retVal, err } @@ -21768,6 +23036,7 @@ func(ts *Tensor) LeakyRelu_()(err error) { lib.AtgLeakyRelu_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeakyRelu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21786,9 +23055,10 @@ func(ts *Tensor) LeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, se if selfIsResult { cselfIsResult = int32(1) } lib.AtgLeakyReluBackward(ptr, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeakyReluBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LeakyReluBackward") return retVal, err } @@ -21804,9 +23074,10 @@ func(ts *Tensor) LeakyReluBackwardGradInput(gradInput *Tensor, gradOutput *Tenso if selfIsResult { cselfIsResult = int32(1) } lib.AtgLeakyReluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeakyReluBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LeakyReluBackwardGradInput") return retVal, err } @@ -21820,9 +23091,10 @@ func(ts *Tensor) LeakyReluOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgLeakyReluOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LeakyReluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LeakyReluOut") return retVal, err } @@ -21836,9 +23108,10 @@ func(ts *Tensor) Lerp(end *Tensor, weight *Scalar, del bool)(retVal *Tensor, err lib.AtgLerp(ptr, ts.ctensor, end.ctensor, weight.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lerp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Lerp") return retVal, err } @@ -21851,6 +23124,7 @@ func(ts *Tensor) Lerp_(end *Tensor, weight *Scalar)(err error) { lib.AtgLerp_(ptr, ts.ctensor, end.ctensor, weight.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lerp_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21867,9 +23141,10 @@ func(ts *Tensor) LerpScalarOut(out *Tensor, end *Tensor, weight *Scalar, del boo lib.AtgLerpScalarOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LerpScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LerpScalarOut") return retVal, err } @@ -21883,9 +23158,10 @@ func(ts *Tensor) LerpTensor(end *Tensor, weight *Tensor, del bool)(retVal *Tenso lib.AtgLerpTensor(ptr, ts.ctensor, end.ctensor, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LerpTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LerpTensor") return retVal, err } @@ -21898,6 +23174,7 @@ func(ts *Tensor) LerpTensor_(end *Tensor, weight *Tensor)(err error) { lib.AtgLerpTensor_(ptr, ts.ctensor, end.ctensor, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LerpTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21914,9 +23191,10 @@ func(ts *Tensor) LerpTensorOut(out *Tensor, end *Tensor, weight *Tensor, del boo lib.AtgLerpTensorOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LerpTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LerpTensorOut") return retVal, err } @@ -21930,9 +23208,10 @@ func(ts *Tensor) Less(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgLess(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Less() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Less") return retVal, err } @@ -21945,6 +23224,7 @@ func(ts *Tensor) Less_(other *Scalar)(err error) { lib.AtgLess_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Less_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21961,9 +23241,10 @@ func(ts *Tensor) LessEqual(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgLessEqual(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessEqual() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LessEqual") return retVal, err } @@ -21976,6 +23257,7 @@ func(ts *Tensor) LessEqual_(other *Scalar)(err error) { lib.AtgLessEqual_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessEqual_() failed: %w", err) return err } ts.ctensor = *ptr @@ -21992,9 +23274,10 @@ func(ts *Tensor) LessEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal lib.AtgLessEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessEqualScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LessEqualScalarOut") return retVal, err } @@ -22008,9 +23291,10 @@ func(ts *Tensor) LessEqualTensor(other *Tensor, del bool)(retVal *Tensor, err er lib.AtgLessEqualTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessEqualTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LessEqualTensor") return retVal, err } @@ -22023,6 +23307,7 @@ func(ts *Tensor) LessEqualTensor_(other *Tensor)(err error) { lib.AtgLessEqualTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessEqualTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -22039,9 +23324,10 @@ func(ts *Tensor) LessEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal lib.AtgLessEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessEqualTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LessEqualTensorOut") return retVal, err } @@ -22055,9 +23341,10 @@ func(ts *Tensor) LessScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Ten lib.AtgLessScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LessScalarOut") return retVal, err } @@ -22071,9 +23358,10 @@ func(ts *Tensor) LessTensor(other *Tensor, del bool)(retVal *Tensor, err error) lib.AtgLessTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LessTensor") return retVal, err } @@ -22086,6 +23374,7 @@ func(ts *Tensor) LessTensor_(other *Tensor)(err error) { lib.AtgLessTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -22102,9 +23391,10 @@ func(ts *Tensor) LessTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Ten lib.AtgLessTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LessTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LessTensorOut") return retVal, err } @@ -22118,9 +23408,10 @@ func(ts *Tensor) Lgamma(del bool)(retVal *Tensor, err error) { lib.AtgLgamma(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lgamma() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Lgamma") return retVal, err } @@ -22133,6 +23424,7 @@ func(ts *Tensor) Lgamma_()(err error) { lib.AtgLgamma_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lgamma_() failed: %w", err) return err } ts.ctensor = *ptr @@ -22149,9 +23441,10 @@ func(ts *Tensor) LgammaOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLgammaOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LgammaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LgammaOut") return retVal, err } @@ -22165,9 +23458,10 @@ func(ts *Tensor) Lift(del bool)(retVal *Tensor, err error) { lib.AtgLift(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lift() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Lift") return retVal, err } @@ -22181,9 +23475,10 @@ func(ts *Tensor) LiftFresh(del bool)(retVal *Tensor, err error) { lib.AtgLiftFresh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LiftFresh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LiftFresh") return retVal, err } @@ -22197,9 +23492,10 @@ func(ts *Tensor) LiftFreshCopy(del bool)(retVal *Tensor, err error) { lib.AtgLiftFreshCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LiftFreshCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LiftFreshCopy") return retVal, err } @@ -22213,9 +23509,10 @@ func(ts *Tensor) LiftFreshCopyOut(out *Tensor, del bool)(retVal *Tensor, err err lib.AtgLiftFreshCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LiftFreshCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LiftFreshCopyOut") return retVal, err } @@ -22229,9 +23526,10 @@ func(ts *Tensor) LiftOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLiftOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LiftOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LiftOut") return retVal, err } @@ -22247,9 +23545,10 @@ func(ts *Tensor) LinalgCholesky(upper bool, del bool)(retVal *Tensor, err error) if upper { cupper = int32(1) } lib.AtgLinalgCholesky(ptr, ts.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCholesky() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgCholesky") return retVal, err } @@ -22267,10 +23566,11 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgCholeskyEx(ctensorPtr0, ts.ctensor, cupper, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCholeskyEx() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgCholeskyEx_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgCholeskyEx_1") return retVal0, retVal1, err } @@ -22288,10 +23588,11 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgCholeskyExL(ctensorPtr0, l.ctensor, info.ctensor, ts.ctensor, cupper, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCholeskyExL() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgCholeskyExL_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgCholeskyExL_1") return retVal0, retVal1, err } @@ -22307,9 +23608,10 @@ func(ts *Tensor) LinalgCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Te if upper { cupper = int32(1) } lib.AtgLinalgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCholeskyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgCholeskyOut") return retVal, err } @@ -22323,9 +23625,10 @@ func(ts *Tensor) LinalgCond(p *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgLinalgCond(ptr, ts.ctensor, p.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCond() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgCond") return retVal, err } @@ -22339,9 +23642,10 @@ func(ts *Tensor) LinalgCondOut(out *Tensor, p *Scalar, del bool)(retVal *Tensor, lib.AtgLinalgCondOut(ptr, out.ctensor, ts.ctensor, p.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCondOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgCondOut") return retVal, err } @@ -22355,9 +23659,10 @@ func(ts *Tensor) LinalgCondPStr(p string, del bool)(retVal *Tensor, err error) { lib.AtgLinalgCondPStr(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCondPStr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgCondPStr") return retVal, err } @@ -22371,9 +23676,10 @@ func(ts *Tensor) LinalgCondPStrOut(out *Tensor, p string, del bool)(retVal *Tens lib.AtgLinalgCondPStrOut(ptr, out.ctensor, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCondPStrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgCondPStrOut") return retVal, err } @@ -22387,9 +23693,10 @@ func(ts *Tensor) LinalgCross(other *Tensor, dim int64, del bool)(retVal *Tensor, lib.AtgLinalgCross(ptr, ts.ctensor, other.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCross() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgCross") return retVal, err } @@ -22403,9 +23710,10 @@ func(ts *Tensor) LinalgCrossOut(out *Tensor, other *Tensor, dim int64, del bool) lib.AtgLinalgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgCrossOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgCrossOut") return retVal, err } @@ -22418,9 +23726,10 @@ func LinalgDet(a *Tensor)(retVal *Tensor, err error) { lib.AtgLinalgDet(ptr, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgDet() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgDet") return retVal, err } @@ -22433,9 +23742,10 @@ func LinalgDetOut(out *Tensor, a *Tensor)(retVal *Tensor, err error) { lib.AtgLinalgDetOut(ptr, out.ctensor, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgDetOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgDetOut") return retVal, err } @@ -22448,9 +23758,10 @@ func LinalgDiagonal(a *Tensor, offset int64, dim1 int64, dim2 int64)(retVal *Ten lib.AtgLinalgDiagonal(ptr, a.ctensor, offset, dim1, dim2) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgDiagonal() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgDiagonal") return retVal, err } @@ -22464,10 +23775,11 @@ func(ts *Tensor) LinalgEig(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error lib.AtgLinalgEig(ctensorPtr0, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgEig() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgEig_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgEig_1") return retVal0, retVal1, err } @@ -22481,10 +23793,11 @@ func(ts *Tensor) LinalgEigOut(eigenvalues *Tensor, eigenvectors *Tensor, del boo lib.AtgLinalgEigOut(ctensorPtr0, eigenvalues.ctensor, eigenvectors.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgEigOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgEigOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgEigOut_1") return retVal0, retVal1, err } @@ -22498,10 +23811,11 @@ func(ts *Tensor) LinalgEigh(uPLO string, del bool)(retVal0 *Tensor, retVal1 *Ten lib.AtgLinalgEigh(ctensorPtr0, ts.ctensor, uPLO) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgEigh() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgEigh_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgEigh_1") return retVal0, retVal1, err } @@ -22515,10 +23829,11 @@ func(ts *Tensor) LinalgEighEigvals(eigvals *Tensor, eigvecs *Tensor, uPLO string lib.AtgLinalgEighEigvals(ctensorPtr0, eigvals.ctensor, eigvecs.ctensor, ts.ctensor, uPLO) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgEighEigvals() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgEighEigvals_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgEighEigvals_1") return retVal0, retVal1, err } @@ -22532,9 +23847,10 @@ func(ts *Tensor) LinalgEigvals(del bool)(retVal *Tensor, err error) { lib.AtgLinalgEigvals(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgEigvals() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgEigvals") return retVal, err } @@ -22548,9 +23864,10 @@ func(ts *Tensor) LinalgEigvalsOut(out *Tensor, del bool)(retVal *Tensor, err err lib.AtgLinalgEigvalsOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgEigvalsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgEigvalsOut") return retVal, err } @@ -22564,9 +23881,10 @@ func(ts *Tensor) LinalgEigvalsh(uPLO string, del bool)(retVal *Tensor, err error lib.AtgLinalgEigvalsh(ptr, ts.ctensor, uPLO) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgEigvalsh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgEigvalsh") return retVal, err } @@ -22580,9 +23898,10 @@ func(ts *Tensor) LinalgEigvalshOut(out *Tensor, uPLO string, del bool)(retVal *T lib.AtgLinalgEigvalshOut(ptr, out.ctensor, ts.ctensor, uPLO) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgEigvalshOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgEigvalshOut") return retVal, err } @@ -22595,9 +23914,10 @@ func LinalgHouseholderProduct(input *Tensor, tau *Tensor)(retVal *Tensor, err er lib.AtgLinalgHouseholderProduct(ptr, input.ctensor, tau.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgHouseholderProduct() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgHouseholderProduct") return retVal, err } @@ -22610,9 +23930,10 @@ func LinalgHouseholderProductOut(out *Tensor, input *Tensor, tau *Tensor)(retVal lib.AtgLinalgHouseholderProductOut(ptr, out.ctensor, input.ctensor, tau.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgHouseholderProductOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgHouseholderProductOut") return retVal, err } @@ -22625,9 +23946,10 @@ func LinalgInv(a *Tensor)(retVal *Tensor, err error) { lib.AtgLinalgInv(ptr, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgInv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgInv") return retVal, err } @@ -22642,10 +23964,11 @@ func LinalgInvEx(a *Tensor, checkErrors bool)(retVal0 *Tensor, retVal1 *Tensor, if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgInvEx(ctensorPtr0, a.ctensor, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgInvEx() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgInvEx_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgInvEx_1") return retVal0, retVal1, err } @@ -22660,10 +23983,11 @@ func LinalgInvExInverse(inverse *Tensor, info *Tensor, a *Tensor, checkErrors bo if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgInvExInverse(ctensorPtr0, inverse.ctensor, info.ctensor, a.ctensor, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgInvExInverse() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgInvExInverse_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgInvExInverse_1") return retVal0, retVal1, err } @@ -22676,9 +24000,10 @@ func LinalgInvOut(out *Tensor, a *Tensor)(retVal *Tensor, err error) { lib.AtgLinalgInvOut(ptr, out.ctensor, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgInvOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgInvOut") return retVal, err } @@ -22694,10 +24019,11 @@ func(ts *Tensor) LinalgLdlFactor(hermitian bool, del bool)(retVal0 *Tensor, retV if hermitian { chermitian = int32(1) } lib.AtgLinalgLdlFactor(ctensorPtr0, ts.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLdlFactor() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgLdlFactor_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLdlFactor_1") return retVal0, retVal1, err } @@ -22716,11 +24042,12 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgLdlFactorEx(ctensorPtr0, ts.ctensor, chermitian, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLdlFactorEx() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LinalgLdlFactorEx_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLdlFactorEx_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgLdlFactorEx_2") return retVal0, retVal1, retVal2, err } @@ -22739,11 +24066,12 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgLdlFactorExOut(ctensorPtr0, lD.ctensor, pivots.ctensor, info.ctensor, ts.ctensor, chermitian, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLdlFactorExOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LinalgLdlFactorExOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLdlFactorExOut_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgLdlFactorExOut_2") return retVal0, retVal1, retVal2, err } @@ -22759,10 +24087,11 @@ func(ts *Tensor) LinalgLdlFactorOut(lD *Tensor, pivots *Tensor, hermitian bool, if hermitian { chermitian = int32(1) } lib.AtgLinalgLdlFactorOut(ctensorPtr0, lD.ctensor, pivots.ctensor, ts.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLdlFactorOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgLdlFactorOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLdlFactorOut_1") return retVal0, retVal1, err } @@ -22777,9 +24106,10 @@ func LinalgLdlSolve(lD *Tensor, pivots *Tensor, b *Tensor, hermitian bool)(retVa if hermitian { chermitian = int32(1) } lib.AtgLinalgLdlSolve(ptr, lD.ctensor, pivots.ctensor, b.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLdlSolve() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgLdlSolve") return retVal, err } @@ -22794,9 +24124,10 @@ func LinalgLdlSolveOut(out *Tensor, lD *Tensor, pivots *Tensor, b *Tensor, hermi if hermitian { chermitian = int32(1) } lib.AtgLinalgLdlSolveOut(ptr, out.ctensor, lD.ctensor, pivots.ctensor, b.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLdlSolveOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgLdlSolveOut") return retVal, err } @@ -22818,12 +24149,13 @@ func(ts *Tensor) LinalgLstsq(b *Tensor, rcond []float64, driver string, del bool } lib.AtgLinalgLstsq(ctensorPtr0, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLstsq() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "LinalgLstsq_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLstsq_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgLstsq_2") + retVal3 = newTensor(*ctensorPtr3, "LinalgLstsq_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -22845,12 +24177,13 @@ func(ts *Tensor) LinalgLstsqOut(solution *Tensor, residuals *Tensor, rank *Tenso } lib.AtgLinalgLstsqOut(ctensorPtr0, solution.ctensor, residuals.ctensor, rank.ctensor, singularValues.ctensor, ts.ctensor, b.ctensor, crcondVal, crcondNull, driver) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLstsqOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "LinalgLstsqOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLstsqOut_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgLstsqOut_2") + retVal3 = newTensor(*ctensorPtr3, "LinalgLstsqOut_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -22866,11 +24199,12 @@ func LinalgLu(a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, retVal2 * if pivot { cpivot = int32(1) } lib.AtgLinalgLu(ctensorPtr0, a.ctensor, cpivot) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLu() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LinalgLu_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLu_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgLu_2") return retVal0, retVal1, retVal2, err } @@ -22885,10 +24219,11 @@ func LinalgLuFactor(a *Tensor, pivot bool)(retVal0 *Tensor, retVal1 *Tensor, err if pivot { cpivot = int32(1) } lib.AtgLinalgLuFactor(ctensorPtr0, a.ctensor, cpivot) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLuFactor() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgLuFactor_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLuFactor_1") return retVal0, retVal1, err } @@ -22906,11 +24241,12 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgLuFactorEx(ctensorPtr0, a.ctensor, cpivot, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLuFactorEx() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LinalgLuFactorEx_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLuFactorEx_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgLuFactorEx_2") return retVal0, retVal1, retVal2, err } @@ -22928,11 +24264,12 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgLuFactorExOut(ctensorPtr0, lU.ctensor, pivots.ctensor, info.ctensor, a.ctensor, cpivot, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLuFactorExOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LinalgLuFactorExOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLuFactorExOut_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgLuFactorExOut_2") return retVal0, retVal1, retVal2, err } @@ -22947,10 +24284,11 @@ func LinalgLuFactorOut(lU *Tensor, pivots *Tensor, a *Tensor, pivot bool)(retVal if pivot { cpivot = int32(1) } lib.AtgLinalgLuFactorOut(ctensorPtr0, lU.ctensor, pivots.ctensor, a.ctensor, cpivot) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLuFactorOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgLuFactorOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLuFactorOut_1") return retVal0, retVal1, err } @@ -22966,11 +24304,12 @@ func LinalgLuOut(p *Tensor, l *Tensor, u *Tensor, a *Tensor, pivot bool)(retVal0 if pivot { cpivot = int32(1) } lib.AtgLinalgLuOut(ctensorPtr0, p.ctensor, l.ctensor, u.ctensor, a.ctensor, cpivot) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLuOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LinalgLuOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgLuOut_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgLuOut_2") return retVal0, retVal1, retVal2, err } @@ -22987,9 +24326,10 @@ cadjoint := int32(0) if adjoint { cadjoint = int32(1) } lib.AtgLinalgLuSolve(ptr, lU.ctensor, pivots.ctensor, b.ctensor, cleft, cadjoint) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLuSolve() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgLuSolve") return retVal, err } @@ -23006,9 +24346,10 @@ cadjoint := int32(0) if adjoint { cadjoint = int32(1) } lib.AtgLinalgLuSolveOut(ptr, out.ctensor, lU.ctensor, pivots.ctensor, b.ctensor, cleft, cadjoint) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgLuSolveOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgLuSolveOut") return retVal, err } @@ -23022,9 +24363,10 @@ func(ts *Tensor) LinalgMatmul(other *Tensor, del bool)(retVal *Tensor, err error lib.AtgLinalgMatmul(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatmul() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatmul") return retVal, err } @@ -23038,9 +24380,10 @@ func(ts *Tensor) LinalgMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *T lib.AtgLinalgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatmulOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatmulOut") return retVal, err } @@ -23054,9 +24397,10 @@ func(ts *Tensor) LinalgMatrixExp(del bool)(retVal *Tensor, err error) { lib.AtgLinalgMatrixExp(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixExp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixExp") return retVal, err } @@ -23070,9 +24414,10 @@ func(ts *Tensor) LinalgMatrixExpOut(out *Tensor, del bool)(retVal *Tensor, err e lib.AtgLinalgMatrixExpOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixExpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixExpOut") return retVal, err } @@ -23086,9 +24431,10 @@ func(ts *Tensor) LinalgMatrixPower(n int64, del bool)(retVal *Tensor, err error) lib.AtgLinalgMatrixPower(ptr, ts.ctensor, n) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixPower() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixPower") return retVal, err } @@ -23102,9 +24448,10 @@ func(ts *Tensor) LinalgMatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Te lib.AtgLinalgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixPowerOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixPowerOut") return retVal, err } @@ -23120,9 +24467,10 @@ func(ts *Tensor) LinalgMatrixRank(tol float64, hermitian bool, del bool)(retVal if hermitian { chermitian = int32(1) } lib.AtgLinalgMatrixRank(ptr, ts.ctensor, tol, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixRank() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixRank") return retVal, err } @@ -23150,9 +24498,10 @@ chermitian := int32(0) if hermitian { chermitian = int32(1) } lib.AtgLinalgMatrixRankAtolRtolFloat(ptr, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixRankAtolRtolFloat() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixRankAtolRtolFloat") return retVal, err } @@ -23180,9 +24529,10 @@ chermitian := int32(0) if hermitian { chermitian = int32(1) } lib.AtgLinalgMatrixRankAtolRtolFloatOut(ptr, out.ctensor, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixRankAtolRtolFloatOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixRankAtolRtolFloatOut") return retVal, err } @@ -23197,9 +24547,10 @@ func LinalgMatrixRankAtolRtolTensor(input *Tensor, atol *Tensor, rtol *Tensor, h if hermitian { chermitian = int32(1) } lib.AtgLinalgMatrixRankAtolRtolTensor(ptr, input.ctensor, atol.ctensor, rtol.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixRankAtolRtolTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixRankAtolRtolTensor") return retVal, err } @@ -23214,9 +24565,10 @@ func LinalgMatrixRankAtolRtolTensorOut(out *Tensor, input *Tensor, atol *Tensor, if hermitian { chermitian = int32(1) } lib.AtgLinalgMatrixRankAtolRtolTensorOut(ptr, out.ctensor, input.ctensor, atol.ctensor, rtol.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixRankAtolRtolTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixRankAtolRtolTensorOut") return retVal, err } @@ -23232,9 +24584,10 @@ func(ts *Tensor) LinalgMatrixRankOut(out *Tensor, tol float64, hermitian bool, d if hermitian { chermitian = int32(1) } lib.AtgLinalgMatrixRankOut(ptr, out.ctensor, ts.ctensor, tol, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixRankOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixRankOut") return retVal, err } @@ -23249,9 +24602,10 @@ func LinalgMatrixRankOutTolTensor(out *Tensor, input *Tensor, tol *Tensor, hermi if hermitian { chermitian = int32(1) } lib.AtgLinalgMatrixRankOutTolTensor(ptr, out.ctensor, input.ctensor, tol.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixRankOutTolTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixRankOutTolTensor") return retVal, err } @@ -23266,9 +24620,10 @@ func LinalgMatrixRankTolTensor(input *Tensor, tol *Tensor, hermitian bool)(retVa if hermitian { chermitian = int32(1) } lib.AtgLinalgMatrixRankTolTensor(ptr, input.ctensor, tol.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMatrixRankTolTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMatrixRankTolTensor") return retVal, err } @@ -23283,9 +24638,10 @@ func LinalgMultiDot(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgLinalgMultiDot(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMultiDot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMultiDot") return retVal, err } @@ -23300,9 +24656,10 @@ func LinalgMultiDotOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgLinalgMultiDotOut(ptr, out.ctensor, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgMultiDotOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgMultiDotOut") return retVal, err } @@ -23319,9 +24676,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgLinalgNorm(ptr, ts.ctensor, ord.cscalar, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgNorm") return retVal, err } @@ -23338,9 +24696,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgLinalgNormOrdStr(ptr, ts.ctensor, ord, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgNormOrdStr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgNormOrdStr") return retVal, err } @@ -23357,9 +24716,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgLinalgNormOrdStrOut(ptr, out.ctensor, ts.ctensor, ord, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgNormOrdStrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgNormOrdStrOut") return retVal, err } @@ -23376,9 +24736,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgLinalgNormOut(ptr, out.ctensor, ts.ctensor, ord.cscalar, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgNormOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgNormOut") return retVal, err } @@ -23394,9 +24755,10 @@ func(ts *Tensor) LinalgPinv(rcond float64, hermitian bool, del bool)(retVal *Ten if hermitian { chermitian = int32(1) } lib.AtgLinalgPinv(ptr, ts.ctensor, rcond, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgPinv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgPinv") return retVal, err } @@ -23424,9 +24786,10 @@ chermitian := int32(0) if hermitian { chermitian = int32(1) } lib.AtgLinalgPinvAtolRtolFloat(ptr, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgPinvAtolRtolFloat() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgPinvAtolRtolFloat") return retVal, err } @@ -23454,9 +24817,10 @@ chermitian := int32(0) if hermitian { chermitian = int32(1) } lib.AtgLinalgPinvAtolRtolFloatOut(ptr, out.ctensor, ts.ctensor, catolVal, catolNull, crtolVal, crtolNull, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgPinvAtolRtolFloatOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgPinvAtolRtolFloatOut") return retVal, err } @@ -23472,9 +24836,10 @@ func(ts *Tensor) LinalgPinvAtolRtolTensor(atol *Tensor, rtol *Tensor, hermitian if hermitian { chermitian = int32(1) } lib.AtgLinalgPinvAtolRtolTensor(ptr, ts.ctensor, atol.ctensor, rtol.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgPinvAtolRtolTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgPinvAtolRtolTensor") return retVal, err } @@ -23490,9 +24855,10 @@ func(ts *Tensor) LinalgPinvAtolRtolTensorOut(out *Tensor, atol *Tensor, rtol *Te if hermitian { chermitian = int32(1) } lib.AtgLinalgPinvAtolRtolTensorOut(ptr, out.ctensor, ts.ctensor, atol.ctensor, rtol.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgPinvAtolRtolTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgPinvAtolRtolTensorOut") return retVal, err } @@ -23508,9 +24874,10 @@ func(ts *Tensor) LinalgPinvOut(out *Tensor, rcond float64, hermitian bool, del b if hermitian { chermitian = int32(1) } lib.AtgLinalgPinvOut(ptr, out.ctensor, ts.ctensor, rcond, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgPinvOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgPinvOut") return retVal, err } @@ -23526,9 +24893,10 @@ func(ts *Tensor) LinalgPinvOutRcondTensor(out *Tensor, rcond *Tensor, hermitian if hermitian { chermitian = int32(1) } lib.AtgLinalgPinvOutRcondTensor(ptr, out.ctensor, ts.ctensor, rcond.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgPinvOutRcondTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgPinvOutRcondTensor") return retVal, err } @@ -23544,9 +24912,10 @@ func(ts *Tensor) LinalgPinvRcondTensor(rcond *Tensor, hermitian bool, del bool)( if hermitian { chermitian = int32(1) } lib.AtgLinalgPinvRcondTensor(ptr, ts.ctensor, rcond.ctensor, chermitian) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgPinvRcondTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgPinvRcondTensor") return retVal, err } @@ -23559,10 +24928,11 @@ func LinalgQr(a *Tensor, mode string)(retVal0 *Tensor, retVal1 *Tensor, err erro lib.AtgLinalgQr(ctensorPtr0, a.ctensor, mode) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgQr() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgQr_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgQr_1") return retVal0, retVal1, err } @@ -23575,10 +24945,11 @@ func LinalgQrOut(q *Tensor, r *Tensor, a *Tensor, mode string)(retVal0 *Tensor, lib.AtgLinalgQrOut(ctensorPtr0, q.ctensor, r.ctensor, a.ctensor, mode) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgQrOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgQrOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgQrOut_1") return retVal0, retVal1, err } @@ -23591,10 +24962,11 @@ func LinalgSlogdet(a *Tensor)(retVal0 *Tensor, retVal1 *Tensor, err error) { lib.AtgLinalgSlogdet(ctensorPtr0, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSlogdet() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgSlogdet_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgSlogdet_1") return retVal0, retVal1, err } @@ -23607,10 +24979,11 @@ func LinalgSlogdetOut(sign *Tensor, logabsdet *Tensor, a *Tensor)(retVal0 *Tenso lib.AtgLinalgSlogdetOut(ctensorPtr0, sign.ctensor, logabsdet.ctensor, a.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSlogdetOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgSlogdetOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgSlogdetOut_1") return retVal0, retVal1, err } @@ -23625,9 +24998,10 @@ func LinalgSolve(a *Tensor, b *Tensor, left bool)(retVal *Tensor, err error) { if left { cleft = int32(1) } lib.AtgLinalgSolve(ptr, a.ctensor, b.ctensor, cleft) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSolve() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgSolve") return retVal, err } @@ -23644,10 +25018,11 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgSolveEx(ctensorPtr0, a.ctensor, b.ctensor, cleft, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSolveEx() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgSolveEx_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgSolveEx_1") return retVal0, retVal1, err } @@ -23664,10 +25039,11 @@ ccheckErrors := int32(0) if checkErrors { ccheckErrors = int32(1) } lib.AtgLinalgSolveExOut(ctensorPtr0, result.ctensor, info.ctensor, a.ctensor, b.ctensor, cleft, ccheckErrors) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSolveExOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LinalgSolveExOut_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgSolveExOut_1") return retVal0, retVal1, err } @@ -23682,9 +25058,10 @@ func LinalgSolveOut(out *Tensor, a *Tensor, b *Tensor, left bool)(retVal *Tensor if left { cleft = int32(1) } lib.AtgLinalgSolveOut(ptr, out.ctensor, a.ctensor, b.ctensor, cleft) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSolveOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgSolveOut") return retVal, err } @@ -23704,9 +25081,10 @@ cunitriangular := int32(0) if unitriangular { cunitriangular = int32(1) } lib.AtgLinalgSolveTriangular(ptr, ts.ctensor, b.ctensor, cupper, cleft, cunitriangular) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSolveTriangular() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgSolveTriangular") return retVal, err } @@ -23726,9 +25104,10 @@ cunitriangular := int32(0) if unitriangular { cunitriangular = int32(1) } lib.AtgLinalgSolveTriangularOut(ptr, out.ctensor, ts.ctensor, b.ctensor, cupper, cleft, cunitriangular) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSolveTriangularOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgSolveTriangularOut") return retVal, err } @@ -23744,11 +25123,12 @@ func LinalgSvd(a *Tensor, fullMatrices bool, driver string)(retVal0 *Tensor, ret if fullMatrices { cfullMatrices = int32(1) } lib.AtgLinalgSvd(ctensorPtr0, a.ctensor, cfullMatrices, driver) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSvd() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LinalgSvd_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgSvd_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgSvd_2") return retVal0, retVal1, retVal2, err } @@ -23764,11 +25144,12 @@ func LinalgSvdU(u *Tensor, s *Tensor, vh *Tensor, a *Tensor, fullMatrices bool, if fullMatrices { cfullMatrices = int32(1) } lib.AtgLinalgSvdU(ctensorPtr0, u.ctensor, s.ctensor, vh.ctensor, a.ctensor, cfullMatrices, driver) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSvdU() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LinalgSvdU_0") + retVal1 = newTensor(*ctensorPtr1, "LinalgSvdU_1") + retVal2 = newTensor(*ctensorPtr2, "LinalgSvdU_2") return retVal0, retVal1, retVal2, err } @@ -23781,9 +25162,10 @@ func LinalgSvdvals(a *Tensor, driver string)(retVal *Tensor, err error) { lib.AtgLinalgSvdvals(ptr, a.ctensor, driver) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSvdvals() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgSvdvals") return retVal, err } @@ -23796,9 +25178,10 @@ func LinalgSvdvalsOut(out *Tensor, a *Tensor, driver string)(retVal *Tensor, err lib.AtgLinalgSvdvalsOut(ptr, out.ctensor, a.ctensor, driver) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgSvdvalsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgSvdvalsOut") return retVal, err } @@ -23812,9 +25195,10 @@ func(ts *Tensor) LinalgTensorinv(ind int64, del bool)(retVal *Tensor, err error) lib.AtgLinalgTensorinv(ptr, ts.ctensor, ind) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgTensorinv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgTensorinv") return retVal, err } @@ -23828,9 +25212,10 @@ func(ts *Tensor) LinalgTensorinvOut(out *Tensor, ind int64, del bool)(retVal *Te lib.AtgLinalgTensorinvOut(ptr, out.ctensor, ts.ctensor, ind) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgTensorinvOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgTensorinvOut") return retVal, err } @@ -23845,9 +25230,10 @@ func(ts *Tensor) LinalgTensorsolve(other *Tensor, dims []int64, del bool)(retVal dimsLen := len(dims) lib.AtgLinalgTensorsolve(ptr, ts.ctensor, other.ctensor, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgTensorsolve() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgTensorsolve") return retVal, err } @@ -23862,9 +25248,10 @@ func(ts *Tensor) LinalgTensorsolveOut(out *Tensor, other *Tensor, dims []int64, dimsLen := len(dims) lib.AtgLinalgTensorsolveOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgTensorsolveOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgTensorsolveOut") return retVal, err } @@ -23883,9 +25270,10 @@ func LinalgVander(x *Tensor, n []int64)(retVal *Tensor, err error) { } lib.AtgLinalgVander(ptr, x.ctensor, cnVal, cnNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgVander() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgVander") return retVal, err } @@ -23898,9 +25286,10 @@ func LinalgVecdot(x *Tensor, y *Tensor, dim int64)(retVal *Tensor, err error) { lib.AtgLinalgVecdot(ptr, x.ctensor, y.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgVecdot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgVecdot") return retVal, err } @@ -23913,9 +25302,10 @@ func LinalgVecdotOut(out *Tensor, x *Tensor, y *Tensor, dim int64)(retVal *Tenso lib.AtgLinalgVecdotOut(ptr, out.ctensor, x.ctensor, y.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinalgVecdotOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinalgVecdotOut") return retVal, err } @@ -23928,9 +25318,10 @@ func Linear(input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor, err err lib.AtgLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Linear() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Linear") return retVal, err } @@ -23943,9 +25334,10 @@ func LinearOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor)(retVal lib.AtgLinearOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinearOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinearOut") return retVal, err } @@ -23958,9 +25350,10 @@ func Linspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, lib.AtgLinspace(ptr, start.cscalar, end.cscalar, steps, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Linspace() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Linspace") return retVal, err } @@ -23973,9 +25366,10 @@ func LinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64)(retVal *T lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps) if err = TorchErr(); err != nil { + err = fmt.Errorf("LinspaceOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LinspaceOut") return retVal, err } @@ -23989,9 +25383,10 @@ func(ts *Tensor) Log(del bool)(retVal *Tensor, err error) { lib.AtgLog(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Log") return retVal, err } @@ -24005,9 +25400,10 @@ func(ts *Tensor) Log10(del bool)(retVal *Tensor, err error) { lib.AtgLog10(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log10() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Log10") return retVal, err } @@ -24020,6 +25416,7 @@ func(ts *Tensor) Log10_()(err error) { lib.AtgLog10_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log10_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24036,9 +25433,10 @@ func(ts *Tensor) Log10Out(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLog10Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log10Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Log10Out") return retVal, err } @@ -24052,9 +25450,10 @@ func(ts *Tensor) Log1p(del bool)(retVal *Tensor, err error) { lib.AtgLog1p(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log1p() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Log1p") return retVal, err } @@ -24067,6 +25466,7 @@ func(ts *Tensor) Log1p_()(err error) { lib.AtgLog1p_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log1p_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24083,9 +25483,10 @@ func(ts *Tensor) Log1pOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLog1pOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log1pOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Log1pOut") return retVal, err } @@ -24099,9 +25500,10 @@ func(ts *Tensor) Log2(del bool)(retVal *Tensor, err error) { lib.AtgLog2(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Log2") return retVal, err } @@ -24114,6 +25516,7 @@ func(ts *Tensor) Log2_()(err error) { lib.AtgLog2_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log2_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24130,9 +25533,10 @@ func(ts *Tensor) Log2Out(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLog2Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Log2Out") return retVal, err } @@ -24145,6 +25549,7 @@ func(ts *Tensor) Log_()(err error) { lib.AtgLog_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Log_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24161,9 +25566,10 @@ func(ts *Tensor) LogNormal(mean float64, std float64, del bool)(retVal *Tensor, lib.AtgLogNormal(ptr, ts.ctensor, mean, std) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogNormal() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogNormal") return retVal, err } @@ -24176,6 +25582,7 @@ func(ts *Tensor) LogNormal_(mean float64, std float64)(err error) { lib.AtgLogNormal_(ptr, ts.ctensor, mean, std) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogNormal_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24192,9 +25599,10 @@ func(ts *Tensor) LogNormalOut(out *Tensor, mean float64, std float64, del bool)( lib.AtgLogNormalOut(ptr, out.ctensor, ts.ctensor, mean, std) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogNormalOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogNormalOut") return retVal, err } @@ -24208,9 +25616,10 @@ func(ts *Tensor) LogOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLogOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogOut") return retVal, err } @@ -24224,9 +25633,10 @@ func(ts *Tensor) LogSigmoid(del bool)(retVal *Tensor, err error) { lib.AtgLogSigmoid(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogSigmoid() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogSigmoid") return retVal, err } @@ -24240,9 +25650,10 @@ func(ts *Tensor) LogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool lib.AtgLogSigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor, buffer.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogSigmoidBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogSigmoidBackward") return retVal, err } @@ -24256,9 +25667,10 @@ func(ts *Tensor) LogSigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tens lib.AtgLogSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, buffer.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogSigmoidBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogSigmoidBackwardGradInput") return retVal, err } @@ -24272,9 +25684,10 @@ func(ts *Tensor) LogSigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgLogSigmoidOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogSigmoidOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogSigmoidOut") return retVal, err } @@ -24288,9 +25701,10 @@ func(ts *Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tens lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogSoftmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogSoftmax") return retVal, err } @@ -24304,9 +25718,10 @@ func(ts *Tensor) LogSoftmaxIntOut(out *Tensor, dim int64, dtype gotch.DType, del lib.AtgLogSoftmaxIntOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogSoftmaxIntOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogSoftmaxIntOut") return retVal, err } @@ -24320,9 +25735,10 @@ func(ts *Tensor) Logaddexp(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLogaddexp(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logaddexp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Logaddexp") return retVal, err } @@ -24336,9 +25752,10 @@ func(ts *Tensor) Logaddexp2(other *Tensor, del bool)(retVal *Tensor, err error) lib.AtgLogaddexp2(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logaddexp2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Logaddexp2") return retVal, err } @@ -24352,9 +25769,10 @@ func(ts *Tensor) Logaddexp2Out(out *Tensor, other *Tensor, del bool)(retVal *Ten lib.AtgLogaddexp2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logaddexp2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Logaddexp2Out") return retVal, err } @@ -24368,9 +25786,10 @@ func(ts *Tensor) LogaddexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tens lib.AtgLogaddexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogaddexpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogaddexpOut") return retVal, err } @@ -24384,9 +25803,10 @@ func(ts *Tensor) Logcumsumexp(dim int64, del bool)(retVal *Tensor, err error) { lib.AtgLogcumsumexp(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logcumsumexp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Logcumsumexp") return retVal, err } @@ -24400,9 +25820,10 @@ func(ts *Tensor) LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tenso lib.AtgLogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogcumsumexpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogcumsumexpOut") return retVal, err } @@ -24416,9 +25837,10 @@ func(ts *Tensor) Logdet(del bool)(retVal *Tensor, err error) { lib.AtgLogdet(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logdet() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Logdet") return retVal, err } @@ -24432,9 +25854,10 @@ func(ts *Tensor) LogicalAnd(other *Tensor, del bool)(retVal *Tensor, err error) lib.AtgLogicalAnd(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalAnd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogicalAnd") return retVal, err } @@ -24447,6 +25870,7 @@ func(ts *Tensor) LogicalAnd_(other *Tensor)(err error) { lib.AtgLogicalAnd_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalAnd_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24463,9 +25887,10 @@ func(ts *Tensor) LogicalAndOut(out *Tensor, other *Tensor, del bool)(retVal *Ten lib.AtgLogicalAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalAndOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogicalAndOut") return retVal, err } @@ -24479,9 +25904,10 @@ func(ts *Tensor) LogicalNot(del bool)(retVal *Tensor, err error) { lib.AtgLogicalNot(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalNot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogicalNot") return retVal, err } @@ -24494,6 +25920,7 @@ func(ts *Tensor) LogicalNot_()(err error) { lib.AtgLogicalNot_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalNot_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24510,9 +25937,10 @@ func(ts *Tensor) LogicalNotOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgLogicalNotOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalNotOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogicalNotOut") return retVal, err } @@ -24526,9 +25954,10 @@ func(ts *Tensor) LogicalOr(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLogicalOr(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalOr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogicalOr") return retVal, err } @@ -24541,6 +25970,7 @@ func(ts *Tensor) LogicalOr_(other *Tensor)(err error) { lib.AtgLogicalOr_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalOr_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24557,9 +25987,10 @@ func(ts *Tensor) LogicalOrOut(out *Tensor, other *Tensor, del bool)(retVal *Tens lib.AtgLogicalOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalOrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogicalOrOut") return retVal, err } @@ -24573,9 +26004,10 @@ func(ts *Tensor) LogicalXor(other *Tensor, del bool)(retVal *Tensor, err error) lib.AtgLogicalXor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalXor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogicalXor") return retVal, err } @@ -24588,6 +26020,7 @@ func(ts *Tensor) LogicalXor_(other *Tensor)(err error) { lib.AtgLogicalXor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalXor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24604,9 +26037,10 @@ func(ts *Tensor) LogicalXorOut(out *Tensor, other *Tensor, del bool)(retVal *Ten lib.AtgLogicalXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogicalXorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogicalXorOut") return retVal, err } @@ -24626,9 +26060,10 @@ func(ts *Tensor) Logit(eps []float64, del bool)(retVal *Tensor, err error) { } lib.AtgLogit(ptr, ts.ctensor, cepsVal, cepsNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logit() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Logit") return retVal, err } @@ -24647,6 +26082,7 @@ func(ts *Tensor) Logit_(eps []float64)(err error) { } lib.AtgLogit_(ptr, ts.ctensor, cepsVal, cepsNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logit_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24669,9 +26105,10 @@ func(ts *Tensor) LogitBackward(gradOutput *Tensor, eps []float64, del bool)(retV } lib.AtgLogitBackward(ptr, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogitBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogitBackward") return retVal, err } @@ -24691,9 +26128,10 @@ func(ts *Tensor) LogitBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, e } lib.AtgLogitBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogitBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogitBackwardGradInput") return retVal, err } @@ -24713,9 +26151,10 @@ func(ts *Tensor) LogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor, } lib.AtgLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogitOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogitOut") return retVal, err } @@ -24728,9 +26167,10 @@ func Logspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind lib.AtgLogspace(ptr, start.cscalar, end.cscalar, steps, base, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logspace() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Logspace") return retVal, err } @@ -24743,9 +26183,10 @@ func LogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base floa lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps, base) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogspaceOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogspaceOut") return retVal, err } @@ -24762,9 +26203,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgLogsumexp(ptr, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Logsumexp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Logsumexp") return retVal, err } @@ -24781,9 +26223,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("LogsumexpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LogsumexpOut") return retVal, err } @@ -24809,11 +26252,12 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.AtgLstm(ctensorPtr0, input.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lstm() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "Lstm_0") + retVal1 = newTensor(*ctensorPtr1, "Lstm_1") + retVal2 = newTensor(*ctensorPtr2, "Lstm_2") return retVal0, retVal1, retVal2, err } @@ -24828,10 +26272,11 @@ func LstmCell(input *Tensor, hx []*Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor for _, t := range hx {chx = append(chx, t.ctensor)} lib.AtgLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LstmCell() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "LstmCell_0") + retVal1 = newTensor(*ctensorPtr1, "LstmCell_1") return retVal0, retVal1, err } @@ -24855,11 +26300,12 @@ cbidirectional := int32(0) if bidirectional { cbidirectional = int32(1) } lib.AtgLstmData(ctensorPtr0, data.ctensor, batchSizes.ctensor, chx, len(chx), cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) if err = TorchErr(); err != nil { + err = fmt.Errorf("LstmData() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LstmData_0") + retVal1 = newTensor(*ctensorPtr1, "LstmData_1") + retVal2 = newTensor(*ctensorPtr2, "LstmData_2") return retVal0, retVal1, retVal2, err } @@ -24873,9 +26319,10 @@ func(ts *Tensor) Lt(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgLt(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Lt") return retVal, err } @@ -24888,6 +26335,7 @@ func(ts *Tensor) Lt_(other *Scalar)(err error) { lib.AtgLt_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Lt_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24904,9 +26352,10 @@ func(ts *Tensor) LtScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tenso lib.AtgLtScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("LtScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LtScalarOut") return retVal, err } @@ -24920,9 +26369,10 @@ func(ts *Tensor) LtTensor(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgLtTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LtTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LtTensor") return retVal, err } @@ -24935,6 +26385,7 @@ func(ts *Tensor) LtTensor_(other *Tensor)(err error) { lib.AtgLtTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LtTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -24951,9 +26402,10 @@ func(ts *Tensor) LtTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgLtTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LtTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LtTensorOut") return retVal, err } @@ -24967,9 +26419,10 @@ func(ts *Tensor) LuSolve(lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Ten lib.AtgLuSolve(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LuSolve() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LuSolve") return retVal, err } @@ -24983,9 +26436,10 @@ func(ts *Tensor) LuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del b lib.AtgLuSolveOut(ptr, out.ctensor, ts.ctensor, lUData.ctensor, lUPivots.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("LuSolveOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "LuSolveOut") return retVal, err } @@ -25003,11 +26457,12 @@ cunpackPivots := int32(0) if unpackPivots { cunpackPivots = int32(1) } lib.AtgLuUnpack(ctensorPtr0, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots) if err = TorchErr(); err != nil { + err = fmt.Errorf("LuUnpack() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LuUnpack_0") + retVal1 = newTensor(*ctensorPtr1, "LuUnpack_1") + retVal2 = newTensor(*ctensorPtr2, "LuUnpack_2") return retVal0, retVal1, retVal2, err } @@ -25025,11 +26480,12 @@ cunpackPivots := int32(0) if unpackPivots { cunpackPivots = int32(1) } lib.AtgLuUnpackOut(ctensorPtr0, p.ctensor, l.ctensor, u.ctensor, lUData.ctensor, lUPivots.ctensor, cunpackData, cunpackPivots) if err = TorchErr(); err != nil { + err = fmt.Errorf("LuUnpackOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "LuUnpackOut_0") + retVal1 = newTensor(*ctensorPtr1, "LuUnpackOut_1") + retVal2 = newTensor(*ctensorPtr2, "LuUnpackOut_2") return retVal0, retVal1, retVal2, err } @@ -25042,9 +26498,10 @@ func MarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin fl lib.AtgMarginRankingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MarginRankingLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MarginRankingLoss") return retVal, err } @@ -25058,9 +26515,10 @@ func(ts *Tensor) MaskedFill(mask *Tensor, value *Scalar, del bool)(retVal *Tenso lib.AtgMaskedFill(ptr, ts.ctensor, mask.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedFill() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedFill") return retVal, err } @@ -25073,6 +26531,7 @@ func(ts *Tensor) MaskedFill_(mask *Tensor, value *Scalar)(err error) { lib.AtgMaskedFill_(ptr, ts.ctensor, mask.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedFill_() failed: %w", err) return err } ts.ctensor = *ptr @@ -25089,9 +26548,10 @@ func(ts *Tensor) MaskedFillScalarOut(out *Tensor, mask *Tensor, value *Scalar, d lib.AtgMaskedFillScalarOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedFillScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedFillScalarOut") return retVal, err } @@ -25105,9 +26565,10 @@ func(ts *Tensor) MaskedFillTensor(mask *Tensor, value *Tensor, del bool)(retVal lib.AtgMaskedFillTensor(ptr, ts.ctensor, mask.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedFillTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedFillTensor") return retVal, err } @@ -25120,6 +26581,7 @@ func(ts *Tensor) MaskedFillTensor_(mask *Tensor, value *Tensor)(err error) { lib.AtgMaskedFillTensor_(ptr, ts.ctensor, mask.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedFillTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -25136,9 +26598,10 @@ func(ts *Tensor) MaskedFillTensorOut(out *Tensor, mask *Tensor, value *Tensor, d lib.AtgMaskedFillTensorOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, value.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedFillTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedFillTensorOut") return retVal, err } @@ -25152,9 +26615,10 @@ func(ts *Tensor) MaskedScatter(mask *Tensor, source *Tensor, del bool)(retVal *T lib.AtgMaskedScatter(ptr, ts.ctensor, mask.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedScatter() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedScatter") return retVal, err } @@ -25167,6 +26631,7 @@ func(ts *Tensor) MaskedScatter_(mask *Tensor, source *Tensor)(err error) { lib.AtgMaskedScatter_(ptr, ts.ctensor, mask.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedScatter_() failed: %w", err) return err } ts.ctensor = *ptr @@ -25183,9 +26648,10 @@ func(ts *Tensor) MaskedScatterOut(out *Tensor, mask *Tensor, source *Tensor, del lib.AtgMaskedScatterOut(ptr, out.ctensor, ts.ctensor, mask.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedScatterOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedScatterOut") return retVal, err } @@ -25199,9 +26665,10 @@ func(ts *Tensor) MaskedSelect(mask *Tensor, del bool)(retVal *Tensor, err error) lib.AtgMaskedSelect(ptr, ts.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedSelect() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedSelect") return retVal, err } @@ -25214,9 +26681,10 @@ func MaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor)(retVal *Ten lib.AtgMaskedSelectBackward(ptr, grad.ctensor, input.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedSelectBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedSelectBackward") return retVal, err } @@ -25230,9 +26698,10 @@ func(ts *Tensor) MaskedSelectOut(out *Tensor, mask *Tensor, del bool)(retVal *Te lib.AtgMaskedSelectOut(ptr, out.ctensor, ts.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaskedSelectOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaskedSelectOut") return retVal, err } @@ -25246,9 +26715,10 @@ func(ts *Tensor) Matmul(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMatmul(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Matmul() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Matmul") return retVal, err } @@ -25262,9 +26732,10 @@ func(ts *Tensor) MatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, lib.AtgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MatmulOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MatmulOut") return retVal, err } @@ -25278,9 +26749,10 @@ func(ts *Tensor) MatrixExp(del bool)(retVal *Tensor, err error) { lib.AtgMatrixExp(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MatrixExp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MatrixExp") return retVal, err } @@ -25294,9 +26766,10 @@ func(ts *Tensor) MatrixExpBackward(grad *Tensor, del bool)(retVal *Tensor, err e lib.AtgMatrixExpBackward(ptr, ts.ctensor, grad.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MatrixExpBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MatrixExpBackward") return retVal, err } @@ -25310,9 +26783,10 @@ func(ts *Tensor) MatrixH(del bool)(retVal *Tensor, err error) { lib.AtgMatrixH(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MatrixH() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MatrixH") return retVal, err } @@ -25326,9 +26800,10 @@ func(ts *Tensor) MatrixPower(n int64, del bool)(retVal *Tensor, err error) { lib.AtgMatrixPower(ptr, ts.ctensor, n) if err = TorchErr(); err != nil { + err = fmt.Errorf("MatrixPower() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MatrixPower") return retVal, err } @@ -25342,9 +26817,10 @@ func(ts *Tensor) MatrixPowerOut(out *Tensor, n int64, del bool)(retVal *Tensor, lib.AtgMatrixPowerOut(ptr, out.ctensor, ts.ctensor, n) if err = TorchErr(); err != nil { + err = fmt.Errorf("MatrixPowerOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MatrixPowerOut") return retVal, err } @@ -25358,9 +26834,10 @@ func(ts *Tensor) Max(del bool)(retVal *Tensor, err error) { lib.AtgMax(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Max() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Max") return retVal, err } @@ -25376,10 +26853,11 @@ func(ts *Tensor) MaxDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retV if keepdim { ckeepdim = int32(1) } lib.AtgMaxDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxDim() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MaxDim_0") + retVal1 = newTensor(*ctensorPtr1, "MaxDim_1") return retVal0, retVal1, err } @@ -25395,10 +26873,11 @@ func(ts *Tensor) MaxDimMax(max *Tensor, maxValues *Tensor, dim int64, keepdim bo if keepdim { ckeepdim = int32(1) } lib.AtgMaxDimMax(ctensorPtr0, max.ctensor, maxValues.ctensor, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxDimMax() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MaxDimMax_0") + retVal1 = newTensor(*ctensorPtr1, "MaxDimMax_1") return retVal0, retVal1, err } @@ -25412,9 +26891,10 @@ func(ts *Tensor) MaxOther(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMaxOther(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxOther() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxOther") return retVal, err } @@ -25428,9 +26908,10 @@ func(ts *Tensor) MaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, er lib.AtgMaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxOut") return retVal, err } @@ -25450,9 +26931,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool1d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool1d") return retVal, err } @@ -25472,10 +26954,11 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool1dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool1dWithIndices() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MaxPool1dWithIndices_0") + retVal1 = newTensor(*ctensorPtr1, "MaxPool1dWithIndices_1") return retVal0, retVal1, err } @@ -25495,9 +26978,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool2d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool2d") return retVal, err } @@ -25517,9 +27001,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool2dBackward") return retVal, err } @@ -25539,9 +27024,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool2dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool2dBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool2dBackwardOut") return retVal, err } @@ -25561,10 +27047,11 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool2dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool2dWithIndices() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MaxPool2dWithIndices_0") + retVal1 = newTensor(*ctensorPtr1, "MaxPool2dWithIndices_1") return retVal0, retVal1, err } @@ -25584,9 +27071,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool2dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool2dWithIndicesBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool2dWithIndicesBackward") return retVal, err } @@ -25606,9 +27094,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool2dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool2dWithIndicesBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool2dWithIndicesBackwardGradInput") return retVal, err } @@ -25628,10 +27117,11 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool2dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool2dWithIndicesOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MaxPool2dWithIndicesOut_0") + retVal1 = newTensor(*ctensorPtr1, "MaxPool2dWithIndicesOut_1") return retVal0, retVal1, err } @@ -25651,9 +27141,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool3d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool3d") return retVal, err } @@ -25673,10 +27164,11 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool3dWithIndices(ctensorPtr0, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool3dWithIndices() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MaxPool3dWithIndices_0") + retVal1 = newTensor(*ctensorPtr1, "MaxPool3dWithIndices_1") return retVal0, retVal1, err } @@ -25696,9 +27188,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool3dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool3dWithIndicesBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool3dWithIndicesBackward") return retVal, err } @@ -25718,9 +27211,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool3dWithIndicesBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode, indices.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool3dWithIndicesBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxPool3dWithIndicesBackwardGradInput") return retVal, err } @@ -25740,10 +27234,11 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMaxPool3dWithIndicesOut(ctensorPtr0, out.ctensor, indices.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxPool3dWithIndicesOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MaxPool3dWithIndicesOut_0") + retVal1 = newTensor(*ctensorPtr1, "MaxPool3dWithIndicesOut_1") return retVal0, retVal1, err } @@ -25757,9 +27252,10 @@ func(ts *Tensor) MaxUnaryOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMaxUnaryOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxUnaryOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxUnaryOut") return retVal, err } @@ -25774,9 +27270,10 @@ func(ts *Tensor) MaxUnpool2d(indices *Tensor, outputSize []int64, del bool)(retV outputSizeLen := len(outputSize) lib.AtgMaxUnpool2d(ptr, ts.ctensor, indices.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxUnpool2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxUnpool2d") return retVal, err } @@ -25791,9 +27288,10 @@ func(ts *Tensor) MaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64 outputSizeLen := len(outputSize) lib.AtgMaxUnpool2dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxUnpool2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxUnpool2dOut") return retVal, err } @@ -25810,9 +27308,10 @@ strideLen := len(stride) paddingLen := len(padding) lib.AtgMaxUnpool3d(ptr, ts.ctensor, indices.ctensor, outputSize, outputSizeLen, stride, strideLen, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxUnpool3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxUnpool3d") return retVal, err } @@ -25829,9 +27328,10 @@ strideLen := len(stride) paddingLen := len(padding) lib.AtgMaxUnpool3dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, outputSizeLen, stride, strideLen, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaxUnpool3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaxUnpool3dOut") return retVal, err } @@ -25845,9 +27345,10 @@ func(ts *Tensor) Maximum(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMaximum(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Maximum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Maximum") return retVal, err } @@ -25861,9 +27362,10 @@ func(ts *Tensor) MaximumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor lib.AtgMaximumOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MaximumOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MaximumOut") return retVal, err } @@ -25877,9 +27379,10 @@ func(ts *Tensor) Mean(dtype gotch.DType, del bool)(retVal *Tensor, err error) { lib.AtgMean(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mean() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Mean") return retVal, err } @@ -25896,9 +27399,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgMeanDim(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("MeanDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MeanDim") return retVal, err } @@ -25915,9 +27419,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgMeanOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("MeanOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MeanOut") return retVal, err } @@ -25931,9 +27436,10 @@ func(ts *Tensor) Median(del bool)(retVal *Tensor, err error) { lib.AtgMedian(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Median() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Median") return retVal, err } @@ -25949,10 +27455,11 @@ func(ts *Tensor) MedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, r if keepdim { ckeepdim = int32(1) } lib.AtgMedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("MedianDim() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MedianDim_0") + retVal1 = newTensor(*ctensorPtr1, "MedianDim_1") return retVal0, retVal1, err } @@ -25968,10 +27475,11 @@ func(ts *Tensor) MedianDimValues(values *Tensor, indices *Tensor, dim int64, kee if keepdim { ckeepdim = int32(1) } lib.AtgMedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("MedianDimValues() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MedianDimValues_0") + retVal1 = newTensor(*ctensorPtr1, "MedianDimValues_1") return retVal0, retVal1, err } @@ -25985,9 +27493,10 @@ func(ts *Tensor) MedianOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMedianOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MedianOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MedianOut") return retVal, err } @@ -26001,9 +27510,10 @@ func(ts *Tensor) Mh(del bool)(retVal *Tensor, err error) { lib.AtgMh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Mh") return retVal, err } @@ -26017,9 +27527,10 @@ func(ts *Tensor) Min(del bool)(retVal *Tensor, err error) { lib.AtgMin(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Min() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Min") return retVal, err } @@ -26035,10 +27546,11 @@ func(ts *Tensor) MinDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retV if keepdim { ckeepdim = int32(1) } lib.AtgMinDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("MinDim() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MinDim_0") + retVal1 = newTensor(*ctensorPtr1, "MinDim_1") return retVal0, retVal1, err } @@ -26054,10 +27566,11 @@ func(ts *Tensor) MinDimMin(min *Tensor, minIndices *Tensor, dim int64, keepdim b if keepdim { ckeepdim = int32(1) } lib.AtgMinDimMin(ctensorPtr0, min.ctensor, minIndices.ctensor, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("MinDimMin() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MinDimMin_0") + retVal1 = newTensor(*ctensorPtr1, "MinDimMin_1") return retVal0, retVal1, err } @@ -26071,9 +27584,10 @@ func(ts *Tensor) MinOther(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMinOther(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MinOther() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MinOther") return retVal, err } @@ -26087,9 +27601,10 @@ func(ts *Tensor) MinOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, er lib.AtgMinOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MinOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MinOut") return retVal, err } @@ -26103,9 +27618,10 @@ func(ts *Tensor) Minimum(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMinimum(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Minimum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Minimum") return retVal, err } @@ -26119,9 +27635,10 @@ func(ts *Tensor) MinimumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor lib.AtgMinimumOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MinimumOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MinimumOut") return retVal, err } @@ -26137,11 +27654,12 @@ func MiopenBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *T if training { ctraining = int32(1) } lib.AtgMiopenBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenBatchNorm() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "MiopenBatchNorm_0") + retVal1 = newTensor(*ctensorPtr1, "MiopenBatchNorm_1") + retVal2 = newTensor(*ctensorPtr2, "MiopenBatchNorm_2") return retVal0, retVal1, retVal2, err } @@ -26155,11 +27673,12 @@ func MiopenBatchNormBackward(input *Tensor, gradOutput *Tensor, weight *Tensor, lib.AtgMiopenBatchNormBackward(ctensorPtr0, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenBatchNormBackward() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "MiopenBatchNormBackward_0") + retVal1 = newTensor(*ctensorPtr1, "MiopenBatchNormBackward_1") + retVal2 = newTensor(*ctensorPtr2, "MiopenBatchNormBackward_2") return retVal0, retVal1, retVal2, err } @@ -26173,11 +27692,12 @@ func MiopenBatchNormBackwardOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input lib.AtgMiopenBatchNormBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, gradOutput.ctensor, weight.ctensor, runningMean.ctensor, runningVar.ctensor, saveMean.ctensor, saveVar.ctensor, epsilon) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenBatchNormBackwardOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "MiopenBatchNormBackwardOut_0") + retVal1 = newTensor(*ctensorPtr1, "MiopenBatchNormBackwardOut_1") + retVal2 = newTensor(*ctensorPtr2, "MiopenBatchNormBackwardOut_2") return retVal0, retVal1, retVal2, err } @@ -26193,11 +27713,12 @@ func MiopenBatchNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, if training { ctraining = int32(1) } lib.AtgMiopenBatchNormOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, exponentialAverageFactor, epsilon) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenBatchNormOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "MiopenBatchNormOut_0") + retVal1 = newTensor(*ctensorPtr1, "MiopenBatchNormOut_1") + retVal2 = newTensor(*ctensorPtr2, "MiopenBatchNormOut_2") return retVal0, retVal1, retVal2, err } @@ -26218,9 +27739,10 @@ cdeterministic := int32(0) if deterministic { cdeterministic = int32(1) } lib.AtgMiopenConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenConvolution() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MiopenConvolution") return retVal, err } @@ -26237,9 +27759,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgMiopenConvolutionAddRelu(ptr, ts.ctensor, weight.ctensor, z.ctensor, alpha.cscalar, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenConvolutionAddRelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MiopenConvolutionAddRelu") return retVal, err } @@ -26260,9 +27783,10 @@ cdeterministic := int32(0) if deterministic { cdeterministic = int32(1) } lib.AtgMiopenConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenConvolutionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MiopenConvolutionOut") return retVal, err } @@ -26279,9 +27803,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgMiopenConvolutionRelu(ptr, ts.ctensor, weight.ctensor, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenConvolutionRelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MiopenConvolutionRelu") return retVal, err } @@ -26303,9 +27828,10 @@ cdeterministic := int32(0) if deterministic { cdeterministic = int32(1) } lib.AtgMiopenConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenConvolutionTranspose() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MiopenConvolutionTranspose") return retVal, err } @@ -26327,9 +27853,10 @@ cdeterministic := int32(0) if deterministic { cdeterministic = int32(1) } lib.AtgMiopenConvolutionTransposeOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, outputPadding, outputPaddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenConvolutionTransposeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MiopenConvolutionTransposeOut") return retVal, err } @@ -26350,9 +27877,10 @@ cdeterministic := int32(0) if deterministic { cdeterministic = int32(1) } lib.AtgMiopenDepthwiseConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenDepthwiseConvolution() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MiopenDepthwiseConvolution") return retVal, err } @@ -26373,9 +27901,10 @@ cdeterministic := int32(0) if deterministic { cdeterministic = int32(1) } lib.AtgMiopenDepthwiseConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, cbenchmark, cdeterministic) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenDepthwiseConvolutionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MiopenDepthwiseConvolutionOut") return retVal, err } @@ -26400,13 +27929,14 @@ cbidirectional := int32(0) batchSizesLen := len(batchSizes) lib.AtgMiopenRnn(ctensorPtr0, input.ctensor, cweight, len(cweight), weightStride0, hx.ctensor, cx.ctensor, mode, hiddenSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, batchSizesLen, dropoutState.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenRnn() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} + retVal0 = newTensor(*ctensorPtr0, "MiopenRnn_0") + retVal1 = newTensor(*ctensorPtr1, "MiopenRnn_1") + retVal2 = newTensor(*ctensorPtr2, "MiopenRnn_2") + retVal3 = newTensor(*ctensorPtr3, "MiopenRnn_3") + retVal4 = newTensor(*ctensorPtr4, "MiopenRnn_4") return retVal0, retVal1, retVal2, retVal3, retVal4, err } @@ -26431,13 +27961,14 @@ cbidirectional := int32(0) batchSizesLen := len(batchSizes) lib.AtgMiopenRnnOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, out4.ctensor, input.ctensor, cweight, len(cweight), weightStride0, hx.ctensor, cx.ctensor, mode, hiddenSize, numLayers, cbatchFirst, dropout, ctrain, cbidirectional, batchSizes, batchSizesLen, dropoutState.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MiopenRnnOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} + retVal0 = newTensor(*ctensorPtr0, "MiopenRnnOut_0") + retVal1 = newTensor(*ctensorPtr1, "MiopenRnnOut_1") + retVal2 = newTensor(*ctensorPtr2, "MiopenRnnOut_2") + retVal3 = newTensor(*ctensorPtr3, "MiopenRnnOut_3") + retVal4 = newTensor(*ctensorPtr4, "MiopenRnnOut_4") return retVal0, retVal1, retVal2, retVal3, retVal4, err } @@ -26451,9 +27982,10 @@ func(ts *Tensor) Mish(del bool)(retVal *Tensor, err error) { lib.AtgMish(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mish() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Mish") return retVal, err } @@ -26466,6 +27998,7 @@ func(ts *Tensor) Mish_()(err error) { lib.AtgMish_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mish_() failed: %w", err) return err } ts.ctensor = *ptr @@ -26482,9 +28015,10 @@ func(ts *Tensor) MishBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err lib.AtgMishBackward(ptr, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MishBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MishBackward") return retVal, err } @@ -26498,9 +28032,10 @@ func(ts *Tensor) MishOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMishOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MishOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MishOut") return retVal, err } @@ -26515,9 +28050,10 @@ func(ts *Tensor) MkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *T outputSizeLen := len(outputSize) lib.AtgMkldnnAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnAdaptiveAvgPool2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnAdaptiveAvgPool2d") return retVal, err } @@ -26531,9 +28067,10 @@ func(ts *Tensor) MkldnnAdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(r lib.AtgMkldnnAdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnAdaptiveAvgPool2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnAdaptiveAvgPool2dBackward") return retVal, err } @@ -26547,9 +28084,10 @@ func(ts *Tensor) MkldnnAdaptiveAvgPool2dBackwardOut(out *Tensor, gradOutput *Ten lib.AtgMkldnnAdaptiveAvgPool2dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnAdaptiveAvgPool2dBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnAdaptiveAvgPool2dBackwardOut") return retVal, err } @@ -26564,9 +28102,10 @@ func(ts *Tensor) MkldnnAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del outputSizeLen := len(outputSize) lib.AtgMkldnnAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnAdaptiveAvgPool2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnAdaptiveAvgPool2dOut") return retVal, err } @@ -26583,9 +28122,10 @@ strideLen := len(stride) dilationLen := len(dilation) lib.AtgMkldnnConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnConvolution() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnConvolution") return retVal, err } @@ -26602,9 +28142,10 @@ strideLen := len(stride) dilationLen := len(dilation) lib.AtgMkldnnConvolutionOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnConvolutionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnConvolutionOut") return retVal, err } @@ -26618,9 +28159,10 @@ func(ts *Tensor) MkldnnLinear(weight *Tensor, bias *Tensor, del bool)(retVal *Te lib.AtgMkldnnLinear(ptr, ts.ctensor, weight.ctensor, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnLinear() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnLinear") return retVal, err } @@ -26634,9 +28176,10 @@ func MkldnnLinearBackwardInput(inputSize []int64, gradOutput *Tensor, weight *Te inputSizeLen := len(inputSize) lib.AtgMkldnnLinearBackwardInput(ptr, inputSize, inputSizeLen, gradOutput.ctensor, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnLinearBackwardInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnLinearBackwardInput") return retVal, err } @@ -26650,9 +28193,10 @@ func MkldnnLinearBackwardInputOut(out *Tensor, inputSize []int64, gradOutput *Te inputSizeLen := len(inputSize) lib.AtgMkldnnLinearBackwardInputOut(ptr, out.ctensor, inputSize, inputSizeLen, gradOutput.ctensor, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnLinearBackwardInputOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnLinearBackwardInputOut") return retVal, err } @@ -26667,10 +28211,11 @@ func MkldnnLinearBackwardWeights(gradOutput *Tensor, input *Tensor, weight *Tens if biasDefined { cbiasDefined = int32(1) } lib.AtgMkldnnLinearBackwardWeights(ctensorPtr0, gradOutput.ctensor, input.ctensor, weight.ctensor, cbiasDefined) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnLinearBackwardWeights() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MkldnnLinearBackwardWeights_0") + retVal1 = newTensor(*ctensorPtr1, "MkldnnLinearBackwardWeights_1") return retVal0, retVal1, err } @@ -26685,10 +28230,11 @@ func MkldnnLinearBackwardWeightsOut(out0 *Tensor, out1 *Tensor, gradOutput *Tens if biasDefined { cbiasDefined = int32(1) } lib.AtgMkldnnLinearBackwardWeightsOut(ctensorPtr0, out0.ctensor, out1.ctensor, gradOutput.ctensor, input.ctensor, weight.ctensor, cbiasDefined) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnLinearBackwardWeightsOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "MkldnnLinearBackwardWeightsOut_0") + retVal1 = newTensor(*ctensorPtr1, "MkldnnLinearBackwardWeightsOut_1") return retVal0, retVal1, err } @@ -26702,9 +28248,10 @@ func(ts *Tensor) MkldnnLinearOut(out *Tensor, weight *Tensor, bias *Tensor, del lib.AtgMkldnnLinearOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, bias.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnLinearOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnLinearOut") return retVal, err } @@ -26724,9 +28271,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMkldnnMaxPool2d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnMaxPool2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnMaxPool2d") return retVal, err } @@ -26745,9 +28293,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMkldnnMaxPool2dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnMaxPool2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnMaxPool2dBackward") return retVal, err } @@ -26766,9 +28315,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMkldnnMaxPool2dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnMaxPool2dBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnMaxPool2dBackwardOut") return retVal, err } @@ -26788,9 +28338,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMkldnnMaxPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnMaxPool2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnMaxPool2dOut") return retVal, err } @@ -26810,9 +28361,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMkldnnMaxPool3d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnMaxPool3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnMaxPool3d") return retVal, err } @@ -26831,9 +28383,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMkldnnMaxPool3dBackward(ptr, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnMaxPool3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnMaxPool3dBackward") return retVal, err } @@ -26852,9 +28405,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMkldnnMaxPool3dBackwardOut(ptr, out.ctensor, gradOutput.ctensor, output.ctensor, input.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnMaxPool3dBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnMaxPool3dBackwardOut") return retVal, err } @@ -26874,9 +28428,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgMkldnnMaxPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnMaxPool3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnMaxPool3dOut") return retVal, err } @@ -26894,9 +28449,10 @@ dilationLen := len(dilation) inputSizeLen := len(inputSize) lib.AtgMkldnnReorderConv2dWeight(ptr, ts.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, inputSize, inputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnReorderConv2dWeight() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnReorderConv2dWeight") return retVal, err } @@ -26914,9 +28470,10 @@ dilationLen := len(dilation) inputSizeLen := len(inputSize) lib.AtgMkldnnReorderConv2dWeightOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups, inputSize, inputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnReorderConv2dWeightOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnReorderConv2dWeightOut") return retVal, err } @@ -26933,9 +28490,10 @@ strideLen := len(stride) dilationLen := len(dilation) lib.AtgMkldnnReorderConv3dWeight(ptr, ts.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnReorderConv3dWeight() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnReorderConv3dWeight") return retVal, err } @@ -26952,9 +28510,10 @@ strideLen := len(stride) dilationLen := len(dilation) lib.AtgMkldnnReorderConv3dWeightOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen, stride, strideLen, dilation, dilationLen, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnReorderConv3dWeightOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MkldnnReorderConv3dWeightOut") return retVal, err } @@ -26980,12 +28539,13 @@ ctrain := int32(0) if train { ctrain = int32(1) } lib.AtgMkldnnRnnLayer(ctensorPtr0, input.ctensor, weight0.ctensor, weight1.ctensor, weight2.ctensor, weight3.ctensor, hx_.ctensor, cx_.ctensor, creverse, batchSizes, batchSizesLen, mode, hiddenSize, numLayers, chasBiases, cbidirectional, cbatchFirst, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnRnnLayer() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "MkldnnRnnLayer_0") + retVal1 = newTensor(*ctensorPtr1, "MkldnnRnnLayer_1") + retVal2 = newTensor(*ctensorPtr2, "MkldnnRnnLayer_2") + retVal3 = newTensor(*ctensorPtr3, "MkldnnRnnLayer_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -27014,15 +28574,16 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.AtgMkldnnRnnLayerBackward(ctensorPtr0, input.ctensor, weight1.ctensor, weight2.ctensor, weight3.ctensor, weight4.ctensor, hx_.ctensor, cxTmp.ctensor, output.ctensor, hy_.ctensor, cy_.ctensor, gradOutput.ctensor, gradHy.ctensor, gradCy.ctensor, creverse, mode, hiddenSize, numLayers, chasBiases, ctrain, cbidirectional, batchSizes, batchSizesLen, cbatchFirst, workspace.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnRnnLayerBackward() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} - retVal5 = &Tensor{ctensor: *ctensorPtr5} - retVal6 = &Tensor{ctensor: *ctensorPtr6} + retVal0 = newTensor(*ctensorPtr0, "MkldnnRnnLayerBackward_0") + retVal1 = newTensor(*ctensorPtr1, "MkldnnRnnLayerBackward_1") + retVal2 = newTensor(*ctensorPtr2, "MkldnnRnnLayerBackward_2") + retVal3 = newTensor(*ctensorPtr3, "MkldnnRnnLayerBackward_3") + retVal4 = newTensor(*ctensorPtr4, "MkldnnRnnLayerBackward_4") + retVal5 = newTensor(*ctensorPtr5, "MkldnnRnnLayerBackward_5") + retVal6 = newTensor(*ctensorPtr6, "MkldnnRnnLayerBackward_6") return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err } @@ -27051,15 +28612,16 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.AtgMkldnnRnnLayerBackwardOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, out4.ctensor, out5.ctensor, out6.ctensor, input.ctensor, weight1.ctensor, weight2.ctensor, weight3.ctensor, weight4.ctensor, hx_.ctensor, cxTmp.ctensor, output.ctensor, hy_.ctensor, cy_.ctensor, gradOutput.ctensor, gradHy.ctensor, gradCy.ctensor, creverse, mode, hiddenSize, numLayers, chasBiases, ctrain, cbidirectional, batchSizes, batchSizesLen, cbatchFirst, workspace.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnRnnLayerBackwardOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} - retVal4 = &Tensor{ctensor: *ctensorPtr4} - retVal5 = &Tensor{ctensor: *ctensorPtr5} - retVal6 = &Tensor{ctensor: *ctensorPtr6} + retVal0 = newTensor(*ctensorPtr0, "MkldnnRnnLayerBackwardOut_0") + retVal1 = newTensor(*ctensorPtr1, "MkldnnRnnLayerBackwardOut_1") + retVal2 = newTensor(*ctensorPtr2, "MkldnnRnnLayerBackwardOut_2") + retVal3 = newTensor(*ctensorPtr3, "MkldnnRnnLayerBackwardOut_3") + retVal4 = newTensor(*ctensorPtr4, "MkldnnRnnLayerBackwardOut_4") + retVal5 = newTensor(*ctensorPtr5, "MkldnnRnnLayerBackwardOut_5") + retVal6 = newTensor(*ctensorPtr6, "MkldnnRnnLayerBackwardOut_6") return retVal0, retVal1, retVal2, retVal3, retVal4, retVal5, retVal6, err } @@ -27085,12 +28647,13 @@ ctrain := int32(0) if train { ctrain = int32(1) } lib.AtgMkldnnRnnLayerOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, out3.ctensor, input.ctensor, weight0.ctensor, weight1.ctensor, weight2.ctensor, weight3.ctensor, hx_.ctensor, cx_.ctensor, creverse, batchSizes, batchSizesLen, mode, hiddenSize, numLayers, chasBiases, cbidirectional, cbatchFirst, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("MkldnnRnnLayerOut() failed: %w", err) return retVal0, retVal1, retVal2, retVal3, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} - retVal3 = &Tensor{ctensor: *ctensorPtr3} + retVal0 = newTensor(*ctensorPtr0, "MkldnnRnnLayerOut_0") + retVal1 = newTensor(*ctensorPtr1, "MkldnnRnnLayerOut_1") + retVal2 = newTensor(*ctensorPtr2, "MkldnnRnnLayerOut_2") + retVal3 = newTensor(*ctensorPtr3, "MkldnnRnnLayerOut_3") return retVal0, retVal1, retVal2, retVal3, err } @@ -27104,9 +28667,10 @@ func(ts *Tensor) Mm(mat2 *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMm(ptr, ts.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Mm") return retVal, err } @@ -27120,9 +28684,10 @@ func(ts *Tensor) MmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, err lib.AtgMmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MmOut") return retVal, err } @@ -27138,10 +28703,11 @@ func(ts *Tensor) Mode(dim int64, keepdim bool, del bool)(retVal0 *Tensor, retVal if keepdim { ckeepdim = int32(1) } lib.AtgMode(ctensorPtr0, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mode() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Mode_0") + retVal1 = newTensor(*ctensorPtr1, "Mode_1") return retVal0, retVal1, err } @@ -27157,10 +28723,11 @@ func(ts *Tensor) ModeValues(values *Tensor, indices *Tensor, dim int64, keepdim if keepdim { ckeepdim = int32(1) } lib.AtgModeValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("ModeValues() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "ModeValues_0") + retVal1 = newTensor(*ctensorPtr1, "ModeValues_1") return retVal0, retVal1, err } @@ -27176,9 +28743,10 @@ func(ts *Tensor) Moveaxis(source []int64, destination []int64, del bool)(retVal destinationLen := len(destination) lib.AtgMoveaxis(ptr, ts.ctensor, source, sourceLen, destination, destinationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Moveaxis() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Moveaxis") return retVal, err } @@ -27192,9 +28760,10 @@ func(ts *Tensor) MoveaxisInt(source int64, destination int64, del bool)(retVal * lib.AtgMoveaxisInt(ptr, ts.ctensor, source, destination) if err = TorchErr(); err != nil { + err = fmt.Errorf("MoveaxisInt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MoveaxisInt") return retVal, err } @@ -27210,9 +28779,10 @@ func(ts *Tensor) Movedim(source []int64, destination []int64, del bool)(retVal * destinationLen := len(destination) lib.AtgMovedim(ptr, ts.ctensor, source, sourceLen, destination, destinationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Movedim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Movedim") return retVal, err } @@ -27226,9 +28796,10 @@ func(ts *Tensor) MovedimInt(source int64, destination int64, del bool)(retVal *T lib.AtgMovedimInt(ptr, ts.ctensor, source, destination) if err = TorchErr(); err != nil { + err = fmt.Errorf("MovedimInt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MovedimInt") return retVal, err } @@ -27242,9 +28813,10 @@ func(ts *Tensor) MseLoss(target *Tensor, reduction int64, del bool)(retVal *Tens lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MseLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MseLoss") return retVal, err } @@ -27258,9 +28830,10 @@ func(ts *Tensor) MseLossBackward(gradOutput *Tensor, target *Tensor, reduction i lib.AtgMseLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MseLossBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MseLossBackward") return retVal, err } @@ -27274,9 +28847,10 @@ func(ts *Tensor) MseLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lib.AtgMseLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MseLossBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MseLossBackwardGradInput") return retVal, err } @@ -27290,9 +28864,10 @@ func(ts *Tensor) MseLossOut(out *Tensor, target *Tensor, reduction int64, del bo lib.AtgMseLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MseLossOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MseLossOut") return retVal, err } @@ -27306,9 +28881,10 @@ func(ts *Tensor) Msort(del bool)(retVal *Tensor, err error) { lib.AtgMsort(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Msort() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Msort") return retVal, err } @@ -27322,9 +28898,10 @@ func(ts *Tensor) MsortOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMsortOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MsortOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MsortOut") return retVal, err } @@ -27338,9 +28915,10 @@ func(ts *Tensor) Mt(del bool)(retVal *Tensor, err error) { lib.AtgMt(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Mt") return retVal, err } @@ -27354,9 +28932,10 @@ func(ts *Tensor) Mul(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMul(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mul() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Mul") return retVal, err } @@ -27369,6 +28948,7 @@ func(ts *Tensor) Mul_(other *Tensor)(err error) { lib.AtgMul_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mul_() failed: %w", err) return err } ts.ctensor = *ptr @@ -27385,9 +28965,10 @@ func(ts *Tensor) MulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, er lib.AtgMulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MulOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MulOut") return retVal, err } @@ -27401,9 +28982,10 @@ func(ts *Tensor) MulScalar(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgMulScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("MulScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MulScalar") return retVal, err } @@ -27416,6 +28998,7 @@ func(ts *Tensor) MulScalar_(other *Scalar)(err error) { lib.AtgMulScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("MulScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -27432,9 +29015,10 @@ func(ts *Tensor) MulScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tens lib.AtgMulScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("MulScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MulScalarOut") return retVal, err } @@ -27448,9 +29032,10 @@ func(ts *Tensor) MultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p * lib.AtgMultiMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultiMarginLossBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultiMarginLossBackward") return retVal, err } @@ -27464,9 +29049,10 @@ func(ts *Tensor) MultiMarginLossBackwardGradInput(gradInput *Tensor, gradOutput lib.AtgMultiMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultiMarginLossBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultiMarginLossBackwardGradInput") return retVal, err } @@ -27480,9 +29066,10 @@ func(ts *Tensor) MultilabelMarginLoss(target *Tensor, reduction int64, del bool) lib.AtgMultilabelMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultilabelMarginLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultilabelMarginLoss") return retVal, err } @@ -27496,9 +29083,10 @@ func(ts *Tensor) MultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor lib.AtgMultilabelMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultilabelMarginLossBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultilabelMarginLossBackward") return retVal, err } @@ -27512,9 +29100,10 @@ func(ts *Tensor) MultilabelMarginLossBackwardGradInput(gradInput *Tensor, gradOu lib.AtgMultilabelMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultilabelMarginLossBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultilabelMarginLossBackwardGradInput") return retVal, err } @@ -27528,9 +29117,10 @@ func(ts *Tensor) MultilabelMarginLossOut(out *Tensor, target *Tensor, reduction lib.AtgMultilabelMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultilabelMarginLossOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultilabelMarginLossOut") return retVal, err } @@ -27546,9 +29136,10 @@ func(ts *Tensor) Multinomial(numSamples int64, replacement bool, del bool)(retVa if replacement { creplacement = int32(1) } lib.AtgMultinomial(ptr, ts.ctensor, numSamples, creplacement) if err = TorchErr(); err != nil { + err = fmt.Errorf("Multinomial() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Multinomial") return retVal, err } @@ -27564,9 +29155,10 @@ func(ts *Tensor) MultinomialOut(out *Tensor, numSamples int64, replacement bool, if replacement { creplacement = int32(1) } lib.AtgMultinomialOut(ptr, out.ctensor, ts.ctensor, numSamples, creplacement) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultinomialOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultinomialOut") return retVal, err } @@ -27580,9 +29172,10 @@ func(ts *Tensor) Multiply(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMultiply(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Multiply() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Multiply") return retVal, err } @@ -27595,6 +29188,7 @@ func(ts *Tensor) Multiply_(other *Tensor)(err error) { lib.AtgMultiply_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Multiply_() failed: %w", err) return err } ts.ctensor = *ptr @@ -27611,9 +29205,10 @@ func(ts *Tensor) MultiplyOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgMultiplyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultiplyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultiplyOut") return retVal, err } @@ -27627,9 +29222,10 @@ func(ts *Tensor) MultiplyScalar(other *Scalar, del bool)(retVal *Tensor, err err lib.AtgMultiplyScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultiplyScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MultiplyScalar") return retVal, err } @@ -27642,6 +29238,7 @@ func(ts *Tensor) MultiplyScalar_(other *Scalar)(err error) { lib.AtgMultiplyScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("MultiplyScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -27658,9 +29255,10 @@ func(ts *Tensor) Mv(vec *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgMv(ptr, ts.ctensor, vec.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Mv") return retVal, err } @@ -27674,9 +29272,10 @@ func(ts *Tensor) MvOut(out *Tensor, vec *Tensor, del bool)(retVal *Tensor, err e lib.AtgMvOut(ptr, out.ctensor, ts.ctensor, vec.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("MvOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MvOut") return retVal, err } @@ -27690,9 +29289,10 @@ func(ts *Tensor) Mvlgamma(p int64, del bool)(retVal *Tensor, err error) { lib.AtgMvlgamma(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mvlgamma() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Mvlgamma") return retVal, err } @@ -27705,6 +29305,7 @@ func(ts *Tensor) Mvlgamma_(p int64)(err error) { lib.AtgMvlgamma_(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("Mvlgamma_() failed: %w", err) return err } ts.ctensor = *ptr @@ -27721,9 +29322,10 @@ func(ts *Tensor) MvlgammaOut(out *Tensor, p int64, del bool)(retVal *Tensor, err lib.AtgMvlgammaOut(ptr, out.ctensor, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("MvlgammaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "MvlgammaOut") return retVal, err } @@ -27755,9 +29357,10 @@ var cneginfVal float64 = 0.0 } lib.AtgNanToNum(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanToNum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NanToNum") return retVal, err } @@ -27788,6 +29391,7 @@ var cneginfVal float64 = 0.0 } lib.AtgNanToNum_(ptr, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanToNum_() failed: %w", err) return err } ts.ctensor = *ptr @@ -27822,9 +29426,10 @@ var cneginfVal float64 = 0.0 } lib.AtgNanToNumOut(ptr, out.ctensor, ts.ctensor, cnanVal, cnanNull, cposinfVal, cposinfNull, cneginfVal, cneginfNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanToNumOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NanToNumOut") return retVal, err } @@ -27841,9 +29446,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNanmean(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Nanmean() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Nanmean") return retVal, err } @@ -27860,9 +29466,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNanmeanOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanmeanOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NanmeanOut") return retVal, err } @@ -27876,9 +29483,10 @@ func(ts *Tensor) Nanmedian(del bool)(retVal *Tensor, err error) { lib.AtgNanmedian(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Nanmedian() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Nanmedian") return retVal, err } @@ -27894,10 +29502,11 @@ func(ts *Tensor) NanmedianDim(dim int64, keepdim bool, del bool)(retVal0 *Tensor if keepdim { ckeepdim = int32(1) } lib.AtgNanmedianDim(ctensorPtr0, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanmedianDim() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "NanmedianDim_0") + retVal1 = newTensor(*ctensorPtr1, "NanmedianDim_1") return retVal0, retVal1, err } @@ -27913,10 +29522,11 @@ func(ts *Tensor) NanmedianDimValues(values *Tensor, indices *Tensor, dim int64, if keepdim { ckeepdim = int32(1) } lib.AtgNanmedianDimValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanmedianDimValues() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "NanmedianDimValues_0") + retVal1 = newTensor(*ctensorPtr1, "NanmedianDimValues_1") return retVal0, retVal1, err } @@ -27930,9 +29540,10 @@ func(ts *Tensor) NanmedianOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgNanmedianOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanmedianOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NanmedianOut") return retVal, err } @@ -27954,9 +29565,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNanquantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) if err = TorchErr(); err != nil { + err = fmt.Errorf("Nanquantile() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Nanquantile") return retVal, err } @@ -27978,9 +29590,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNanquantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanquantileOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NanquantileOut") return retVal, err } @@ -28002,9 +29615,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNanquantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanquantileScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NanquantileScalar") return retVal, err } @@ -28026,9 +29640,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNanquantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) if err = TorchErr(); err != nil { + err = fmt.Errorf("NanquantileScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NanquantileScalarOut") return retVal, err } @@ -28045,9 +29660,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNansum(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Nansum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Nansum") return retVal, err } @@ -28064,9 +29680,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNansumOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NansumOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NansumOut") return retVal, err } @@ -28080,9 +29697,10 @@ func(ts *Tensor) Narrow(dim int64, start int64, length int64, del bool)(retVal * lib.AtgNarrow(ptr, ts.ctensor, dim, start, length) if err = TorchErr(); err != nil { + err = fmt.Errorf("Narrow() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Narrow") return retVal, err } @@ -28096,9 +29714,10 @@ func(ts *Tensor) NarrowCopy(dim int64, start int64, length int64, del bool)(retV lib.AtgNarrowCopy(ptr, ts.ctensor, dim, start, length) if err = TorchErr(); err != nil { + err = fmt.Errorf("NarrowCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NarrowCopy") return retVal, err } @@ -28112,9 +29731,10 @@ func(ts *Tensor) NarrowCopyOut(out *Tensor, dim int64, start int64, length int64 lib.AtgNarrowCopyOut(ptr, out.ctensor, ts.ctensor, dim, start, length) if err = TorchErr(); err != nil { + err = fmt.Errorf("NarrowCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NarrowCopyOut") return retVal, err } @@ -28128,9 +29748,10 @@ func(ts *Tensor) NarrowTensor(dim int64, start *Tensor, length int64, del bool)( lib.AtgNarrowTensor(ptr, ts.ctensor, dim, start.ctensor, length) if err = TorchErr(); err != nil { + err = fmt.Errorf("NarrowTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NarrowTensor") return retVal, err } @@ -28146,11 +29767,12 @@ func NativeBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *T if training { ctraining = int32(1) } lib.AtgNativeBatchNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeBatchNorm() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "NativeBatchNorm_0") + retVal1 = newTensor(*ctensorPtr1, "NativeBatchNorm_1") + retVal2 = newTensor(*ctensorPtr2, "NativeBatchNorm_2") return retVal0, retVal1, retVal2, err } @@ -28166,11 +29788,12 @@ func NativeBatchNormOut(out *Tensor, saveMean *Tensor, saveInvstd *Tensor, input if training { ctraining = int32(1) } lib.AtgNativeBatchNormOut(ctensorPtr0, out.ctensor, saveMean.ctensor, saveInvstd.ctensor, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeBatchNormOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "NativeBatchNormOut_0") + retVal1 = newTensor(*ctensorPtr1, "NativeBatchNormOut_1") + retVal2 = newTensor(*ctensorPtr2, "NativeBatchNormOut_2") return retVal0, retVal1, retVal2, err } @@ -28184,9 +29807,10 @@ func(ts *Tensor) NativeChannelShuffle(groups int64, del bool)(retVal *Tensor, er lib.AtgNativeChannelShuffle(ptr, ts.ctensor, groups) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeChannelShuffle() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NativeChannelShuffle") return retVal, err } @@ -28201,10 +29825,11 @@ func NativeDropout(input *Tensor, p float64, train bool)(retVal0 *Tensor, retVal if train { ctrain = int32(1) } lib.AtgNativeDropout(ctensorPtr0, input.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeDropout() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "NativeDropout_0") + retVal1 = newTensor(*ctensorPtr1, "NativeDropout_1") return retVal0, retVal1, err } @@ -28217,9 +29842,10 @@ func NativeDropoutBackward(gradOutput *Tensor, mask *Tensor, scale float64)(retV lib.AtgNativeDropoutBackward(ptr, gradOutput.ctensor, mask.ctensor, scale) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeDropoutBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NativeDropoutBackward") return retVal, err } @@ -28232,9 +29858,10 @@ func NativeDropoutBackwardOut(out *Tensor, gradOutput *Tensor, mask *Tensor, sca lib.AtgNativeDropoutBackwardOut(ptr, out.ctensor, gradOutput.ctensor, mask.ctensor, scale) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeDropoutBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NativeDropoutBackwardOut") return retVal, err } @@ -28249,10 +29876,11 @@ func NativeDropoutOut(out0 *Tensor, out1 *Tensor, input *Tensor, p float64, trai if train { ctrain = int32(1) } lib.AtgNativeDropoutOut(ctensorPtr0, out0.ctensor, out1.ctensor, input.ctensor, p, ctrain) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeDropoutOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "NativeDropoutOut_0") + retVal1 = newTensor(*ctensorPtr1, "NativeDropoutOut_1") return retVal0, retVal1, err } @@ -28266,11 +29894,12 @@ func NativeGroupNorm(input *Tensor, weight *Tensor, bias *Tensor, n int64, c int lib.AtgNativeGroupNorm(ctensorPtr0, input.ctensor, weight.ctensor, bias.ctensor, n, c, hxW, group, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeGroupNorm() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "NativeGroupNorm_0") + retVal1 = newTensor(*ctensorPtr1, "NativeGroupNorm_1") + retVal2 = newTensor(*ctensorPtr2, "NativeGroupNorm_2") return retVal0, retVal1, retVal2, err } @@ -28284,11 +29913,12 @@ func NativeGroupNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, lib.AtgNativeGroupNormOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, weight.ctensor, bias.ctensor, n, c, hxW, group, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeGroupNormOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "NativeGroupNormOut_0") + retVal1 = newTensor(*ctensorPtr1, "NativeGroupNormOut_1") + retVal2 = newTensor(*ctensorPtr2, "NativeGroupNormOut_2") return retVal0, retVal1, retVal2, err } @@ -28303,11 +29933,12 @@ func NativeLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bia normalizedShapeLen := len(normalizedShape) lib.AtgNativeLayerNorm(ctensorPtr0, input.ctensor, normalizedShape, normalizedShapeLen, weight.ctensor, bias.ctensor, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeLayerNorm() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "NativeLayerNorm_0") + retVal1 = newTensor(*ctensorPtr1, "NativeLayerNorm_1") + retVal2 = newTensor(*ctensorPtr2, "NativeLayerNorm_2") return retVal0, retVal1, retVal2, err } @@ -28322,11 +29953,12 @@ func NativeLayerNormOut(out0 *Tensor, out1 *Tensor, out2 *Tensor, input *Tensor, normalizedShapeLen := len(normalizedShape) lib.AtgNativeLayerNormOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, input.ctensor, normalizedShape, normalizedShapeLen, weight.ctensor, bias.ctensor, eps) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeLayerNormOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "NativeLayerNormOut_0") + retVal1 = newTensor(*ctensorPtr1, "NativeLayerNormOut_1") + retVal2 = newTensor(*ctensorPtr2, "NativeLayerNormOut_2") return retVal0, retVal1, retVal2, err } @@ -28340,9 +29972,10 @@ func(ts *Tensor) NativeNorm(del bool)(retVal *Tensor, err error) { lib.AtgNativeNorm(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NativeNorm") return retVal, err } @@ -28356,9 +29989,10 @@ func(ts *Tensor) NativeNormOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgNativeNormOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeNormOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NativeNormOut") return retVal, err } @@ -28375,9 +30009,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNativeNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeNormScalaroptDimDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NativeNormScalaroptDimDtype") return retVal, err } @@ -28394,9 +30029,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNativeNormScalaroptDimDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NativeNormScalaroptDimDtypeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NativeNormScalaroptDimDtypeOut") return retVal, err } @@ -28410,9 +30046,10 @@ func(ts *Tensor) Ne(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgNe(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ne() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Ne") return retVal, err } @@ -28425,6 +30062,7 @@ func(ts *Tensor) Ne_(other *Scalar)(err error) { lib.AtgNe_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ne_() failed: %w", err) return err } ts.ctensor = *ptr @@ -28441,9 +30079,10 @@ func(ts *Tensor) NeScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tenso lib.AtgNeScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("NeScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NeScalarOut") return retVal, err } @@ -28457,9 +30096,10 @@ func(ts *Tensor) NeTensor(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgNeTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NeTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NeTensor") return retVal, err } @@ -28472,6 +30112,7 @@ func(ts *Tensor) NeTensor_(other *Tensor)(err error) { lib.AtgNeTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NeTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -28488,9 +30129,10 @@ func(ts *Tensor) NeTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgNeTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NeTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NeTensorOut") return retVal, err } @@ -28504,9 +30146,10 @@ func(ts *Tensor) Neg(del bool)(retVal *Tensor, err error) { lib.AtgNeg(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Neg() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Neg") return retVal, err } @@ -28519,6 +30162,7 @@ func(ts *Tensor) Neg_()(err error) { lib.AtgNeg_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Neg_() failed: %w", err) return err } ts.ctensor = *ptr @@ -28535,9 +30179,10 @@ func(ts *Tensor) NegOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgNegOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NegOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NegOut") return retVal, err } @@ -28551,9 +30196,10 @@ func(ts *Tensor) Negative(del bool)(retVal *Tensor, err error) { lib.AtgNegative(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Negative() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Negative") return retVal, err } @@ -28566,6 +30212,7 @@ func(ts *Tensor) Negative_()(err error) { lib.AtgNegative_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Negative_() failed: %w", err) return err } ts.ctensor = *ptr @@ -28582,9 +30229,10 @@ func(ts *Tensor) NegativeOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgNegativeOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NegativeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NegativeOut") return retVal, err } @@ -28599,9 +30247,10 @@ func(ts *Tensor) NestedToPaddedTensor(padding float64, outputSize []int64, del b outputSizeLen := len(outputSize) lib.AtgNestedToPaddedTensor(ptr, ts.ctensor, padding, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("NestedToPaddedTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NestedToPaddedTensor") return retVal, err } @@ -28616,9 +30265,10 @@ func(ts *Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice g sizeLen := len(size) lib.AtgNewEmpty(ptr, ts.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewEmpty() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewEmpty") return retVal, err } @@ -28633,9 +30283,10 @@ func(ts *Tensor) NewEmptyOut(out *Tensor, size []int64, del bool)(retVal *Tensor sizeLen := len(size) lib.AtgNewEmptyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewEmptyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewEmptyOut") return retVal, err } @@ -28651,9 +30302,10 @@ func(ts *Tensor) NewEmptyStrided(size []int64, stride []int64, optionsKind gotch strideLen := len(stride) lib.AtgNewEmptyStrided(ptr, ts.ctensor, size, sizeLen, stride, strideLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewEmptyStrided() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewEmptyStrided") return retVal, err } @@ -28669,9 +30321,10 @@ func(ts *Tensor) NewEmptyStridedOut(out *Tensor, size []int64, stride []int64, d strideLen := len(stride) lib.AtgNewEmptyStridedOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewEmptyStridedOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewEmptyStridedOut") return retVal, err } @@ -28686,9 +30339,10 @@ func(ts *Tensor) NewFull(size []int64, fillValue *Scalar, optionsKind gotch.DTyp sizeLen := len(size) lib.AtgNewFull(ptr, ts.ctensor, size, sizeLen, fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewFull() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewFull") return retVal, err } @@ -28703,9 +30357,10 @@ func(ts *Tensor) NewFullOut(out *Tensor, size []int64, fillValue *Scalar, del bo sizeLen := len(size) lib.AtgNewFullOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, fillValue.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewFullOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewFullOut") return retVal, err } @@ -28720,9 +30375,10 @@ func(ts *Tensor) NewOnes(size []int64, optionsKind gotch.DType, optionsDevice go sizeLen := len(size) lib.AtgNewOnes(ptr, ts.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewOnes() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewOnes") return retVal, err } @@ -28737,9 +30393,10 @@ func(ts *Tensor) NewOnesOut(out *Tensor, size []int64, del bool)(retVal *Tensor, sizeLen := len(size) lib.AtgNewOnesOut(ptr, out.ctensor, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewOnesOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewOnesOut") return retVal, err } @@ -28754,9 +30411,10 @@ func(ts *Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice g sizeLen := len(size) lib.AtgNewZeros(ptr, ts.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewZeros() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewZeros") return retVal, err } @@ -28771,9 +30429,10 @@ func(ts *Tensor) NewZerosOut(out *Tensor, size []int64, del bool)(retVal *Tensor sizeLen := len(size) lib.AtgNewZerosOut(ptr, out.ctensor, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("NewZerosOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NewZerosOut") return retVal, err } @@ -28787,9 +30446,10 @@ func(ts *Tensor) Nextafter(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgNextafter(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Nextafter() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Nextafter") return retVal, err } @@ -28802,6 +30462,7 @@ func(ts *Tensor) Nextafter_(other *Tensor)(err error) { lib.AtgNextafter_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Nextafter_() failed: %w", err) return err } ts.ctensor = *ptr @@ -28818,9 +30479,10 @@ func(ts *Tensor) NextafterOut(out *Tensor, other *Tensor, del bool)(retVal *Tens lib.AtgNextafterOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NextafterOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NextafterOut") return retVal, err } @@ -28834,9 +30496,10 @@ func(ts *Tensor) NllLoss(target *Tensor, weight *Tensor, reduction int64, ignore lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLoss") return retVal, err } @@ -28850,9 +30513,10 @@ func(ts *Tensor) NllLoss2d(target *Tensor, weight *Tensor, reduction int64, igno lib.AtgNllLoss2d(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLoss2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLoss2d") return retVal, err } @@ -28866,9 +30530,10 @@ func(ts *Tensor) NllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *T lib.AtgNllLoss2dBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLoss2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLoss2dBackward") return retVal, err } @@ -28882,9 +30547,10 @@ func(ts *Tensor) NllLoss2dBackwardGradInput(gradInput *Tensor, gradOutput *Tenso lib.AtgNllLoss2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLoss2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLoss2dBackwardGradInput") return retVal, err } @@ -28898,9 +30564,10 @@ func(ts *Tensor) NllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduc lib.AtgNllLoss2dOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLoss2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLoss2dOut") return retVal, err } @@ -28914,9 +30581,10 @@ func(ts *Tensor) NllLossBackward(gradOutput *Tensor, target *Tensor, weight *Ten lib.AtgNllLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLossBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLossBackward") return retVal, err } @@ -28930,9 +30598,10 @@ func(ts *Tensor) NllLossBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, lib.AtgNllLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLossBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLossBackwardGradInput") return retVal, err } @@ -28946,9 +30615,10 @@ func(ts *Tensor) NllLossNd(target *Tensor, weight *Tensor, reduction int64, igno lib.AtgNllLossNd(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLossNd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLossNd") return retVal, err } @@ -28962,9 +30632,10 @@ func(ts *Tensor) NllLossOut(out *Tensor, target *Tensor, weight *Tensor, reducti lib.AtgNllLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) if err = TorchErr(); err != nil { + err = fmt.Errorf("NllLossOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NllLossOut") return retVal, err } @@ -28978,9 +30649,10 @@ func(ts *Tensor) Nonzero(del bool)(retVal *Tensor, err error) { lib.AtgNonzero(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Nonzero() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Nonzero") return retVal, err } @@ -28994,9 +30666,10 @@ func(ts *Tensor) NonzeroOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgNonzeroOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NonzeroOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NonzeroOut") return retVal, err } @@ -29010,9 +30683,10 @@ func(ts *Tensor) Norm(del bool)(retVal *Tensor, err error) { lib.AtgNorm(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Norm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Norm") return retVal, err } @@ -29029,9 +30703,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNormDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormDtypeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormDtypeOut") return retVal, err } @@ -29044,9 +30719,10 @@ func NormExceptDim(v *Tensor, pow int64, dim int64)(retVal *Tensor, err error) { lib.AtgNormExceptDim(ptr, v.ctensor, pow, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormExceptDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormExceptDim") return retVal, err } @@ -29063,9 +30739,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormOut") return retVal, err } @@ -29079,9 +30756,10 @@ func(ts *Tensor) NormScalarOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgNormScalarOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormScalarOut") return retVal, err } @@ -29098,9 +30776,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNormScalaroptDim(ptr, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormScalaroptDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormScalaroptDim") return retVal, err } @@ -29117,9 +30796,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNormScalaroptDimDtype(ptr, ts.ctensor, p.cscalar, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormScalaroptDimDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormScalaroptDimDtype") return retVal, err } @@ -29133,9 +30813,10 @@ func(ts *Tensor) NormScalaroptDtype(p *Scalar, dtype gotch.DType, del bool)(retV lib.AtgNormScalaroptDtype(ptr, ts.ctensor, p.cscalar, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormScalaroptDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormScalaroptDtype") return retVal, err } @@ -29149,9 +30830,10 @@ func(ts *Tensor) NormScalaroptDtypeOut(out *Tensor, p *Scalar, dtype gotch.DType lib.AtgNormScalaroptDtypeOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormScalaroptDtypeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormScalaroptDtypeOut") return retVal, err } @@ -29164,6 +30846,7 @@ func(ts *Tensor) Normal_(mean float64, std float64)(err error) { lib.AtgNormal_(ptr, ts.ctensor, mean, std) if err = TorchErr(); err != nil { + err = fmt.Errorf("Normal_() failed: %w", err) return err } ts.ctensor = *ptr @@ -29180,9 +30863,10 @@ func(ts *Tensor) NormalFunctional(mean float64, std float64, del bool)(retVal *T lib.AtgNormalFunctional(ptr, ts.ctensor, mean, std) if err = TorchErr(); err != nil { + err = fmt.Errorf("NormalFunctional() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NormalFunctional") return retVal, err } @@ -29196,9 +30880,10 @@ func(ts *Tensor) NotEqual(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgNotEqual(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("NotEqual() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NotEqual") return retVal, err } @@ -29211,6 +30896,7 @@ func(ts *Tensor) NotEqual_(other *Scalar)(err error) { lib.AtgNotEqual_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("NotEqual_() failed: %w", err) return err } ts.ctensor = *ptr @@ -29227,9 +30913,10 @@ func(ts *Tensor) NotEqualScalarOut(out *Tensor, other *Scalar, del bool)(retVal lib.AtgNotEqualScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("NotEqualScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NotEqualScalarOut") return retVal, err } @@ -29243,9 +30930,10 @@ func(ts *Tensor) NotEqualTensor(other *Tensor, del bool)(retVal *Tensor, err err lib.AtgNotEqualTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NotEqualTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NotEqualTensor") return retVal, err } @@ -29258,6 +30946,7 @@ func(ts *Tensor) NotEqualTensor_(other *Tensor)(err error) { lib.AtgNotEqualTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NotEqualTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -29274,9 +30963,10 @@ func(ts *Tensor) NotEqualTensorOut(out *Tensor, other *Tensor, del bool)(retVal lib.AtgNotEqualTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NotEqualTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NotEqualTensorOut") return retVal, err } @@ -29292,9 +30982,10 @@ func(ts *Tensor) NuclearNorm(keepdim bool, del bool)(retVal *Tensor, err error) if keepdim { ckeepdim = int32(1) } lib.AtgNuclearNorm(ptr, ts.ctensor, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NuclearNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NuclearNorm") return retVal, err } @@ -29311,9 +31002,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNuclearNormDim(ptr, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NuclearNormDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NuclearNormDim") return retVal, err } @@ -29330,9 +31022,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgNuclearNormDimOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NuclearNormDimOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NuclearNormDimOut") return retVal, err } @@ -29348,9 +31041,10 @@ func(ts *Tensor) NuclearNormOut(out *Tensor, keepdim bool, del bool)(retVal *Ten if keepdim { ckeepdim = int32(1) } lib.AtgNuclearNormOut(ptr, out.ctensor, ts.ctensor, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("NuclearNormOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NuclearNormOut") return retVal, err } @@ -29364,9 +31058,10 @@ func(ts *Tensor) NumpyT(del bool)(retVal *Tensor, err error) { lib.AtgNumpyT(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("NumpyT() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "NumpyT") return retVal, err } @@ -29380,9 +31075,10 @@ func(ts *Tensor) OneHot(numClasses int64, del bool)(retVal *Tensor, err error) { lib.AtgOneHot(ptr, ts.ctensor, numClasses) if err = TorchErr(); err != nil { + err = fmt.Errorf("OneHot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "OneHot") return retVal, err } @@ -29396,9 +31092,10 @@ func Ones(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(ret sizeLen := len(size) lib.AtgOnes(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ones() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Ones") return retVal, err } @@ -29412,9 +31109,10 @@ func(ts *Tensor) OnesLike(del bool)(retVal *Tensor, err error) { lib.AtgOnesLike(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("OnesLike() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "OnesLike") return retVal, err } @@ -29428,9 +31126,10 @@ func(ts *Tensor) OnesLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgOnesLikeOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("OnesLikeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "OnesLikeOut") return retVal, err } @@ -29444,9 +31143,10 @@ func OnesOut(out *Tensor, size []int64)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgOnesOut(ptr, out.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("OnesOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "OnesOut") return retVal, err } @@ -29460,9 +31160,10 @@ func(ts *Tensor) Orgqr(input2 *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgOrgqr(ptr, ts.ctensor, input2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Orgqr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Orgqr") return retVal, err } @@ -29476,9 +31177,10 @@ func(ts *Tensor) OrgqrOut(out *Tensor, input2 *Tensor, del bool)(retVal *Tensor, lib.AtgOrgqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("OrgqrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "OrgqrOut") return retVal, err } @@ -29496,9 +31198,10 @@ ctranspose := int32(0) if transpose { ctranspose = int32(1) } lib.AtgOrmqr(ptr, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ormqr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Ormqr") return retVal, err } @@ -29516,9 +31219,10 @@ ctranspose := int32(0) if transpose { ctranspose = int32(1) } lib.AtgOrmqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) if err = TorchErr(); err != nil { + err = fmt.Errorf("OrmqrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "OrmqrOut") return retVal, err } @@ -29532,9 +31236,10 @@ func(ts *Tensor) Outer(vec2 *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgOuter(ptr, ts.ctensor, vec2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Outer() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Outer") return retVal, err } @@ -29548,9 +31253,10 @@ func(ts *Tensor) OuterOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor, e lib.AtgOuterOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("OuterOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "OuterOut") return retVal, err } @@ -29562,6 +31268,7 @@ func(ts *Tensor) OutputNr(del bool)(retVal int64, err error) { retVal = lib.AtgOutputNr(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("OutputNr() failed: %w", err) return retVal, err } return retVal, err @@ -29583,9 +31290,10 @@ var cvalueVal float64 = 0.0 } lib.AtgPad(ptr, ts.ctensor, pad, padLen, mode, cvalueVal, cvalueNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("Pad() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Pad") return retVal, err } @@ -29602,9 +31310,10 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.AtgPadSequence(ptr, csequences, len(csequences), cbatchFirst, paddingValue) if err = TorchErr(); err != nil { + err = fmt.Errorf("PadSequence() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PadSequence") return retVal, err } @@ -29619,9 +31328,10 @@ func PairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bo if keepdim { ckeepdim = int32(1) } lib.AtgPairwiseDistance(ptr, x1.ctensor, x2.ctensor, p, eps, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("PairwiseDistance() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PairwiseDistance") return retVal, err } @@ -29635,9 +31345,10 @@ func(ts *Tensor) Pdist(p float64, del bool)(retVal *Tensor, err error) { lib.AtgPdist(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("Pdist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Pdist") return retVal, err } @@ -29652,9 +31363,10 @@ func(ts *Tensor) Permute(dims []int64, del bool)(retVal *Tensor, err error) { dimsLen := len(dims) lib.AtgPermute(ptr, ts.ctensor, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Permute() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Permute") return retVal, err } @@ -29669,9 +31381,10 @@ func(ts *Tensor) PermuteCopy(dims []int64, del bool)(retVal *Tensor, err error) dimsLen := len(dims) lib.AtgPermuteCopy(ptr, ts.ctensor, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("PermuteCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PermuteCopy") return retVal, err } @@ -29686,9 +31399,10 @@ func(ts *Tensor) PermuteCopyOut(out *Tensor, dims []int64, del bool)(retVal *Ten dimsLen := len(dims) lib.AtgPermuteCopyOut(ptr, out.ctensor, ts.ctensor, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("PermuteCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PermuteCopyOut") return retVal, err } @@ -29702,9 +31416,10 @@ func(ts *Tensor) PinMemory(device gotch.Device, del bool)(retVal *Tensor, err er lib.AtgPinMemory(ptr, ts.ctensor, device.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("PinMemory() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PinMemory") return retVal, err } @@ -29718,9 +31433,10 @@ func(ts *Tensor) Pinverse(rcond float64, del bool)(retVal *Tensor, err error) { lib.AtgPinverse(ptr, ts.ctensor, rcond) if err = TorchErr(); err != nil { + err = fmt.Errorf("Pinverse() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Pinverse") return retVal, err } @@ -29734,9 +31450,10 @@ func(ts *Tensor) PixelShuffle(upscaleFactor int64, del bool)(retVal *Tensor, err lib.AtgPixelShuffle(ptr, ts.ctensor, upscaleFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PixelShuffle() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PixelShuffle") return retVal, err } @@ -29750,9 +31467,10 @@ func(ts *Tensor) PixelShuffleOut(out *Tensor, upscaleFactor int64, del bool)(ret lib.AtgPixelShuffleOut(ptr, out.ctensor, ts.ctensor, upscaleFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PixelShuffleOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PixelShuffleOut") return retVal, err } @@ -29766,9 +31484,10 @@ func(ts *Tensor) PixelUnshuffle(downscaleFactor int64, del bool)(retVal *Tensor, lib.AtgPixelUnshuffle(ptr, ts.ctensor, downscaleFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PixelUnshuffle() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PixelUnshuffle") return retVal, err } @@ -29782,9 +31501,10 @@ func(ts *Tensor) PixelUnshuffleOut(out *Tensor, downscaleFactor int64, del bool) lib.AtgPixelUnshuffleOut(ptr, out.ctensor, ts.ctensor, downscaleFactor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PixelUnshuffleOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PixelUnshuffleOut") return retVal, err } @@ -29798,9 +31518,10 @@ func(ts *Tensor) Poisson(del bool)(retVal *Tensor, err error) { lib.AtgPoisson(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Poisson() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Poisson") return retVal, err } @@ -29817,9 +31538,10 @@ cfull := int32(0) if full { cfull = int32(1) } lib.AtgPoissonNllLoss(ptr, input.ctensor, target.ctensor, clogInput, cfull, eps, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("PoissonNllLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PoissonNllLoss") return retVal, err } @@ -29833,9 +31555,10 @@ func(ts *Tensor) PoissonOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgPoissonOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PoissonOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PoissonOut") return retVal, err } @@ -29848,9 +31571,10 @@ func Polar(abs *Tensor, angle *Tensor)(retVal *Tensor, err error) { lib.AtgPolar(ptr, abs.ctensor, angle.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Polar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Polar") return retVal, err } @@ -29863,9 +31587,10 @@ func PolarOut(out *Tensor, abs *Tensor, angle *Tensor)(retVal *Tensor, err error lib.AtgPolarOut(ptr, out.ctensor, abs.ctensor, angle.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PolarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PolarOut") return retVal, err } @@ -29879,9 +31604,10 @@ func(ts *Tensor) Polygamma(n int64, del bool)(retVal *Tensor, err error) { lib.AtgPolygamma(ptr, n, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Polygamma() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Polygamma") return retVal, err } @@ -29894,6 +31620,7 @@ func(ts *Tensor) Polygamma_(n int64)(err error) { lib.AtgPolygamma_(ptr, ts.ctensor, n) if err = TorchErr(); err != nil { + err = fmt.Errorf("Polygamma_() failed: %w", err) return err } ts.ctensor = *ptr @@ -29910,9 +31637,10 @@ func(ts *Tensor) PolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor, er lib.AtgPolygammaOut(ptr, out.ctensor, n, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PolygammaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PolygammaOut") return retVal, err } @@ -29926,9 +31654,10 @@ func(ts *Tensor) Positive(del bool)(retVal *Tensor, err error) { lib.AtgPositive(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Positive() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Positive") return retVal, err } @@ -29942,9 +31671,10 @@ func(ts *Tensor) Pow(exponent *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgPow(ptr, ts.ctensor, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Pow() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Pow") return retVal, err } @@ -29957,6 +31687,7 @@ func(ts *Tensor) Pow_(exponent *Scalar)(err error) { lib.AtgPow_(ptr, ts.ctensor, exponent.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Pow_() failed: %w", err) return err } ts.ctensor = *ptr @@ -29972,9 +31703,10 @@ func PowScalar(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor, err error) lib.AtgPowScalar(ptr, selfScalar.cscalar, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PowScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PowScalar") return retVal, err } @@ -29987,9 +31719,10 @@ func PowScalarOut(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Ten lib.AtgPowScalarOut(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PowScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PowScalarOut") return retVal, err } @@ -30002,6 +31735,7 @@ func(ts *Tensor) PowTensor_(exponent *Tensor)(err error) { lib.AtgPowTensor_(ptr, ts.ctensor, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PowTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -30018,9 +31752,10 @@ func(ts *Tensor) PowTensorScalar(exponent *Scalar, del bool)(retVal *Tensor, err lib.AtgPowTensorScalar(ptr, ts.ctensor, exponent.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("PowTensorScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PowTensorScalar") return retVal, err } @@ -30034,9 +31769,10 @@ func(ts *Tensor) PowTensorScalarOut(out *Tensor, exponent *Scalar, del bool)(ret lib.AtgPowTensorScalarOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("PowTensorScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PowTensorScalarOut") return retVal, err } @@ -30050,9 +31786,10 @@ func(ts *Tensor) PowTensorTensorOut(out *Tensor, exponent *Tensor, del bool)(ret lib.AtgPowTensorTensorOut(ptr, out.ctensor, ts.ctensor, exponent.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("PowTensorTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PowTensorTensorOut") return retVal, err } @@ -30066,9 +31803,10 @@ func(ts *Tensor) Prelu(weight *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgPrelu(ptr, ts.ctensor, weight.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Prelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Prelu") return retVal, err } @@ -30082,9 +31820,10 @@ func(ts *Tensor) Prod(dtype gotch.DType, del bool)(retVal *Tensor, err error) { lib.AtgProd(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Prod() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Prod") return retVal, err } @@ -30100,9 +31839,10 @@ func(ts *Tensor) ProdDimInt(dim int64, keepdim bool, dtype gotch.DType, del bool if keepdim { ckeepdim = int32(1) } lib.AtgProdDimInt(ptr, ts.ctensor, dim, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ProdDimInt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ProdDimInt") return retVal, err } @@ -30118,9 +31858,10 @@ func(ts *Tensor) ProdIntOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DT if keepdim { ckeepdim = int32(1) } lib.AtgProdIntOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ProdIntOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ProdIntOut") return retVal, err } @@ -30134,9 +31875,10 @@ func(ts *Tensor) ProdOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tenso lib.AtgProdOut(ptr, out.ctensor, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ProdOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ProdOut") return retVal, err } @@ -30152,9 +31894,10 @@ func(ts *Tensor) Put(index *Tensor, source *Tensor, accumulate bool, del bool)(r if accumulate { caccumulate = int32(1) } lib.AtgPut(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) if err = TorchErr(); err != nil { + err = fmt.Errorf("Put() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Put") return retVal, err } @@ -30169,6 +31912,7 @@ func(ts *Tensor) Put_(index *Tensor, source *Tensor, accumulate bool)(err error) if accumulate { caccumulate = int32(1) } lib.AtgPut_(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) if err = TorchErr(); err != nil { + err = fmt.Errorf("Put_() failed: %w", err) return err } ts.ctensor = *ptr @@ -30187,9 +31931,10 @@ func(ts *Tensor) PutOut(out *Tensor, index *Tensor, source *Tensor, accumulate b if accumulate { caccumulate = int32(1) } lib.AtgPutOut(ptr, out.ctensor, ts.ctensor, index.ctensor, source.ctensor, caccumulate) if err = TorchErr(); err != nil { + err = fmt.Errorf("PutOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "PutOut") return retVal, err } @@ -30201,6 +31946,7 @@ func(ts *Tensor) QPerChannelAxis(del bool)(retVal int64, err error) { retVal = lib.AtgQPerChannelAxis(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("QPerChannelAxis() failed: %w", err) return retVal, err } return retVal, err @@ -30215,9 +31961,10 @@ func(ts *Tensor) QPerChannelScales(del bool)(retVal *Tensor, err error) { lib.AtgQPerChannelScales(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("QPerChannelScales() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QPerChannelScales") return retVal, err } @@ -30231,9 +31978,10 @@ func(ts *Tensor) QPerChannelScalesOut(out *Tensor, del bool)(retVal *Tensor, err lib.AtgQPerChannelScalesOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("QPerChannelScalesOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QPerChannelScalesOut") return retVal, err } @@ -30247,9 +31995,10 @@ func(ts *Tensor) QPerChannelZeroPoints(del bool)(retVal *Tensor, err error) { lib.AtgQPerChannelZeroPoints(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("QPerChannelZeroPoints() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QPerChannelZeroPoints") return retVal, err } @@ -30263,9 +32012,10 @@ func(ts *Tensor) QPerChannelZeroPointsOut(out *Tensor, del bool)(retVal *Tensor, lib.AtgQPerChannelZeroPointsOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("QPerChannelZeroPointsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QPerChannelZeroPointsOut") return retVal, err } @@ -30277,6 +32027,7 @@ if del { defer ts.MustDrop() } retVal = lib.AtgQScale(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("QScale() failed: %w", err) return retVal, err } return retVal, err @@ -30289,6 +32040,7 @@ func(ts *Tensor) QZeroPoint(del bool)(retVal int64, err error) { retVal = lib.AtgQZeroPoint(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("QZeroPoint() failed: %w", err) return retVal, err } return retVal, err @@ -30305,10 +32057,11 @@ func(ts *Tensor) Qr(some bool, del bool)(retVal0 *Tensor, retVal1 *Tensor, err e if some { csome = int32(1) } lib.AtgQr(ctensorPtr0, ts.ctensor, csome) if err = TorchErr(); err != nil { + err = fmt.Errorf("Qr() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Qr_0") + retVal1 = newTensor(*ctensorPtr1, "Qr_1") return retVal0, retVal1, err } @@ -30324,10 +32077,11 @@ func(ts *Tensor) QrQ(q *Tensor, r *Tensor, some bool, del bool)(retVal0 *Tensor, if some { csome = int32(1) } lib.AtgQrQ(ctensorPtr0, q.ctensor, r.ctensor, ts.ctensor, csome) if err = TorchErr(); err != nil { + err = fmt.Errorf("QrQ() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "QrQ_0") + retVal1 = newTensor(*ctensorPtr1, "QrQ_1") return retVal0, retVal1, err } @@ -30349,9 +32103,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgQuantile(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) if err = TorchErr(); err != nil { + err = fmt.Errorf("Quantile() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Quantile") return retVal, err } @@ -30373,9 +32128,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgQuantileOut(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim, interpolation) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantileOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantileOut") return retVal, err } @@ -30397,9 +32153,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgQuantileScalar(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantileScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantileScalar") return retVal, err } @@ -30421,9 +32178,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgQuantileScalarOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim, interpolation) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantileScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantileScalarOut") return retVal, err } @@ -30437,9 +32195,10 @@ func(ts *Tensor) QuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int lib.AtgQuantizePerChannel(ptr, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizePerChannel() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizePerChannel") return retVal, err } @@ -30453,9 +32212,10 @@ func(ts *Tensor) QuantizePerChannelOut(out *Tensor, scales *Tensor, zeroPoints * lib.AtgQuantizePerChannelOut(ptr, out.ctensor, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizePerChannelOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizePerChannelOut") return retVal, err } @@ -30469,9 +32229,10 @@ func(ts *Tensor) QuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.D lib.AtgQuantizePerTensor(ptr, ts.ctensor, scale, zeroPoint, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizePerTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizePerTensor") return retVal, err } @@ -30487,9 +32248,10 @@ func(ts *Tensor) QuantizePerTensorDynamic(dtype gotch.DType, reduceRange bool, d if reduceRange { creduceRange = int32(1) } lib.AtgQuantizePerTensorDynamic(ptr, ts.ctensor, dtype.CInt(), creduceRange) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizePerTensorDynamic() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizePerTensorDynamic") return retVal, err } @@ -30505,9 +32267,10 @@ func(ts *Tensor) QuantizePerTensorDynamicOut(out *Tensor, dtype gotch.DType, red if reduceRange { creduceRange = int32(1) } lib.AtgQuantizePerTensorDynamicOut(ptr, out.ctensor, ts.ctensor, dtype.CInt(), creduceRange) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizePerTensorDynamicOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizePerTensorDynamicOut") return retVal, err } @@ -30521,9 +32284,10 @@ func(ts *Tensor) QuantizePerTensorTensorQparams(scale *Tensor, zeroPoint *Tensor lib.AtgQuantizePerTensorTensorQparams(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizePerTensorTensorQparams() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizePerTensorTensorQparams") return retVal, err } @@ -30536,9 +32300,10 @@ func QuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tenso lib.AtgQuantizedBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedBatchNorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedBatchNorm") return retVal, err } @@ -30551,9 +32316,10 @@ func QuantizedBatchNormOut(out *Tensor, input *Tensor, weight *Tensor, bias *Ten lib.AtgQuantizedBatchNormOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedBatchNormOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedBatchNormOut") return retVal, err } @@ -30566,9 +32332,10 @@ func QuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh * lib.AtgQuantizedGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedGruCell() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedGruCell") return retVal, err } @@ -30583,10 +32350,11 @@ func QuantizedLstmCell(input *Tensor, hx []*Tensor, wIh *Tensor, wHh *Tensor, bI for _, t := range hx {chx = append(chx, t.ctensor)} lib.AtgQuantizedLstmCell(ctensorPtr0, input.ctensor, chx, len(chx), wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedLstmCell() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "QuantizedLstmCell_0") + retVal1 = newTensor(*ctensorPtr1, "QuantizedLstmCell_1") return retVal0, retVal1, err } @@ -30606,9 +32374,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgQuantizedMaxPool1d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedMaxPool1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedMaxPool1d") return retVal, err } @@ -30628,9 +32397,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgQuantizedMaxPool1dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedMaxPool1dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedMaxPool1dOut") return retVal, err } @@ -30650,9 +32420,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgQuantizedMaxPool2d(ptr, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedMaxPool2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedMaxPool2d") return retVal, err } @@ -30672,9 +32443,10 @@ cceilMode := int32(0) if ceilMode { cceilMode = int32(1) } lib.AtgQuantizedMaxPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, kernelSizeLen, stride, strideLen, padding, paddingLen, dilation, dilationLen, cceilMode) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedMaxPool2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedMaxPool2dOut") return retVal, err } @@ -30687,9 +32459,10 @@ func QuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, b lib.AtgQuantizedRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedRnnReluCell() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedRnnReluCell") return retVal, err } @@ -30702,9 +32475,10 @@ func QuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, b lib.AtgQuantizedRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("QuantizedRnnTanhCell() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "QuantizedRnnTanhCell") return retVal, err } @@ -30718,9 +32492,10 @@ func(ts *Tensor) Rad2deg(del bool)(retVal *Tensor, err error) { lib.AtgRad2deg(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rad2deg() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Rad2deg") return retVal, err } @@ -30733,6 +32508,7 @@ func(ts *Tensor) Rad2deg_()(err error) { lib.AtgRad2deg_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rad2deg_() failed: %w", err) return err } ts.ctensor = *ptr @@ -30749,9 +32525,10 @@ func(ts *Tensor) Rad2degOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgRad2degOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rad2degOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Rad2degOut") return retVal, err } @@ -30765,9 +32542,10 @@ func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(ret sizeLen := len(size) lib.AtgRand(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rand() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Rand") return retVal, err } @@ -30781,9 +32559,10 @@ func(ts *Tensor) RandLike(del bool)(retVal *Tensor, err error) { lib.AtgRandLike(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandLike() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandLike") return retVal, err } @@ -30797,9 +32576,10 @@ func(ts *Tensor) RandLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgRandLikeOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandLikeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandLikeOut") return retVal, err } @@ -30813,9 +32593,10 @@ func RandOut(out *Tensor, size []int64)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgRandOut(ptr, out.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandOut") return retVal, err } @@ -30829,9 +32610,10 @@ func Randint(high int64, size []int64, optionsKind gotch.DType, optionsDevice go sizeLen := len(size) lib.AtgRandint(ptr, high, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Randint() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Randint") return retVal, err } @@ -30845,9 +32627,10 @@ func(ts *Tensor) RandintLike(high int64, del bool)(retVal *Tensor, err error) { lib.AtgRandintLike(ptr, ts.ctensor, high) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandintLike() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandintLike") return retVal, err } @@ -30861,9 +32644,10 @@ func(ts *Tensor) RandintLikeLowDtype(low int64, high int64, del bool)(retVal *Te lib.AtgRandintLikeLowDtype(ptr, ts.ctensor, low, high) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandintLikeLowDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandintLikeLowDtype") return retVal, err } @@ -30877,9 +32661,10 @@ func(ts *Tensor) RandintLikeLowDtypeOut(out *Tensor, low int64, high int64, del lib.AtgRandintLikeLowDtypeOut(ptr, out.ctensor, ts.ctensor, low, high) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandintLikeLowDtypeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandintLikeLowDtypeOut") return retVal, err } @@ -30893,9 +32678,10 @@ func(ts *Tensor) RandintLikeOut(out *Tensor, high int64, del bool)(retVal *Tenso lib.AtgRandintLikeOut(ptr, out.ctensor, ts.ctensor, high) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandintLikeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandintLikeOut") return retVal, err } @@ -30909,9 +32695,10 @@ func RandintLow(low int64, high int64, size []int64, optionsKind gotch.DType, op sizeLen := len(size) lib.AtgRandintLow(ptr, low, high, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandintLow() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandintLow") return retVal, err } @@ -30925,9 +32712,10 @@ func RandintLowOut(out *Tensor, low int64, high int64, size []int64)(retVal *Ten sizeLen := len(size) lib.AtgRandintLowOut(ptr, out.ctensor, low, high, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandintLowOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandintLowOut") return retVal, err } @@ -30941,9 +32729,10 @@ func RandintOut(out *Tensor, high int64, size []int64)(retVal *Tensor, err error sizeLen := len(size) lib.AtgRandintOut(ptr, out.ctensor, high, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandintOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandintOut") return retVal, err } @@ -30957,9 +32746,10 @@ func Randn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(re sizeLen := len(size) lib.AtgRandn(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Randn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Randn") return retVal, err } @@ -30973,9 +32763,10 @@ func(ts *Tensor) RandnLike(del bool)(retVal *Tensor, err error) { lib.AtgRandnLike(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandnLike() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandnLike") return retVal, err } @@ -30989,9 +32780,10 @@ func(ts *Tensor) RandnLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgRandnLikeOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandnLikeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandnLikeOut") return retVal, err } @@ -31005,9 +32797,10 @@ func RandnOut(out *Tensor, size []int64)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgRandnOut(ptr, out.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandnOut") return retVal, err } @@ -31021,9 +32814,10 @@ func(ts *Tensor) Random(del bool)(retVal *Tensor, err error) { lib.AtgRandom(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Random() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Random") return retVal, err } @@ -31036,6 +32830,7 @@ func(ts *Tensor) Random_()(err error) { lib.AtgRandom_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Random_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31058,9 +32853,10 @@ func(ts *Tensor) RandomFrom(from int64, to []int64, del bool)(retVal *Tensor, er } lib.AtgRandomFrom(ptr, ts.ctensor, from, ctoVal, ctoNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandomFrom() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandomFrom") return retVal, err } @@ -31079,6 +32875,7 @@ func(ts *Tensor) RandomFrom_(from int64, to []int64)(err error) { } lib.AtgRandomFrom_(ptr, ts.ctensor, from, ctoVal, ctoNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandomFrom_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31101,9 +32898,10 @@ func(ts *Tensor) RandomFromOut(out *Tensor, from int64, to []int64, del bool)(re } lib.AtgRandomFromOut(ptr, out.ctensor, ts.ctensor, from, ctoVal, ctoNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandomFromOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandomFromOut") return retVal, err } @@ -31117,9 +32915,10 @@ func(ts *Tensor) RandomOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgRandomOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandomOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandomOut") return retVal, err } @@ -31133,9 +32932,10 @@ func(ts *Tensor) RandomTo(to int64, del bool)(retVal *Tensor, err error) { lib.AtgRandomTo(ptr, ts.ctensor, to) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandomTo() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandomTo") return retVal, err } @@ -31148,6 +32948,7 @@ func(ts *Tensor) RandomTo_(to int64)(err error) { lib.AtgRandomTo_(ptr, ts.ctensor, to) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandomTo_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31164,9 +32965,10 @@ func(ts *Tensor) RandomToOut(out *Tensor, to int64, del bool)(retVal *Tensor, er lib.AtgRandomToOut(ptr, out.ctensor, ts.ctensor, to) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandomToOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandomToOut") return retVal, err } @@ -31179,9 +32981,10 @@ func Randperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retV lib.AtgRandperm(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Randperm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Randperm") return retVal, err } @@ -31194,9 +32997,10 @@ func RandpermOut(out *Tensor, n int64)(retVal *Tensor, err error) { lib.AtgRandpermOut(ptr, out.ctensor, n) if err = TorchErr(); err != nil { + err = fmt.Errorf("RandpermOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RandpermOut") return retVal, err } @@ -31209,9 +33013,10 @@ func Range(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice go lib.AtgRange(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Range() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Range") return retVal, err } @@ -31224,9 +33029,10 @@ func RangeOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor, err error lib.AtgRangeOut(ptr, out.ctensor, start.cscalar, end.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("RangeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RangeOut") return retVal, err } @@ -31239,9 +33045,10 @@ func RangeOut_(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor, err erro lib.AtgRangeOut_(ptr, out.ctensor, start.cscalar, end.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("RangeOut_() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RangeOut_") return retVal, err } @@ -31254,9 +33061,10 @@ func RangeStep(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevic lib.AtgRangeStep(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("RangeStep() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RangeStep") return retVal, err } @@ -31270,9 +33078,10 @@ func(ts *Tensor) Ravel(del bool)(retVal *Tensor, err error) { lib.AtgRavel(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Ravel() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Ravel") return retVal, err } @@ -31286,9 +33095,10 @@ func(ts *Tensor) Real(del bool)(retVal *Tensor, err error) { lib.AtgReal(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Real() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Real") return retVal, err } @@ -31302,9 +33112,10 @@ func(ts *Tensor) Reciprocal(del bool)(retVal *Tensor, err error) { lib.AtgReciprocal(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Reciprocal() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Reciprocal") return retVal, err } @@ -31317,6 +33128,7 @@ func(ts *Tensor) Reciprocal_()(err error) { lib.AtgReciprocal_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Reciprocal_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31333,9 +33145,10 @@ func(ts *Tensor) ReciprocalOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgReciprocalOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReciprocalOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReciprocalOut") return retVal, err } @@ -31350,9 +33163,10 @@ func(ts *Tensor) ReflectionPad1d(padding []int64, del bool)(retVal *Tensor, err paddingLen := len(padding) lib.AtgReflectionPad1d(ptr, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad1d") return retVal, err } @@ -31367,9 +33181,10 @@ func(ts *Tensor) ReflectionPad1dBackward(gradOutput *Tensor, padding []int64, de paddingLen := len(padding) lib.AtgReflectionPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad1dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad1dBackward") return retVal, err } @@ -31384,9 +33199,10 @@ func(ts *Tensor) ReflectionPad1dBackwardGradInput(gradInput *Tensor, gradOutput paddingLen := len(padding) lib.AtgReflectionPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad1dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad1dBackwardGradInput") return retVal, err } @@ -31401,9 +33217,10 @@ func(ts *Tensor) ReflectionPad1dOut(out *Tensor, padding []int64, del bool)(retV paddingLen := len(padding) lib.AtgReflectionPad1dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad1dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad1dOut") return retVal, err } @@ -31418,9 +33235,10 @@ func(ts *Tensor) ReflectionPad2d(padding []int64, del bool)(retVal *Tensor, err paddingLen := len(padding) lib.AtgReflectionPad2d(ptr, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad2d") return retVal, err } @@ -31435,9 +33253,10 @@ func(ts *Tensor) ReflectionPad2dBackward(gradOutput *Tensor, padding []int64, de paddingLen := len(padding) lib.AtgReflectionPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad2dBackward") return retVal, err } @@ -31452,9 +33271,10 @@ func(ts *Tensor) ReflectionPad2dBackwardGradInput(gradInput *Tensor, gradOutput paddingLen := len(padding) lib.AtgReflectionPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad2dBackwardGradInput") return retVal, err } @@ -31469,9 +33289,10 @@ func(ts *Tensor) ReflectionPad2dOut(out *Tensor, padding []int64, del bool)(retV paddingLen := len(padding) lib.AtgReflectionPad2dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad2dOut") return retVal, err } @@ -31486,9 +33307,10 @@ func(ts *Tensor) ReflectionPad3d(padding []int64, del bool)(retVal *Tensor, err paddingLen := len(padding) lib.AtgReflectionPad3d(ptr, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad3d") return retVal, err } @@ -31503,9 +33325,10 @@ func(ts *Tensor) ReflectionPad3dBackward(gradOutput *Tensor, padding []int64, de paddingLen := len(padding) lib.AtgReflectionPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad3dBackward") return retVal, err } @@ -31520,9 +33343,10 @@ func(ts *Tensor) ReflectionPad3dBackwardGradInput(gradInput *Tensor, gradOutput paddingLen := len(padding) lib.AtgReflectionPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad3dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad3dBackwardGradInput") return retVal, err } @@ -31537,9 +33361,10 @@ func(ts *Tensor) ReflectionPad3dOut(out *Tensor, padding []int64, del bool)(retV paddingLen := len(padding) lib.AtgReflectionPad3dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReflectionPad3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReflectionPad3dOut") return retVal, err } @@ -31553,9 +33378,10 @@ func(ts *Tensor) Relu(del bool)(retVal *Tensor, err error) { lib.AtgRelu(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Relu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Relu") return retVal, err } @@ -31569,9 +33395,10 @@ func(ts *Tensor) Relu6(del bool)(retVal *Tensor, err error) { lib.AtgRelu6(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Relu6() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Relu6") return retVal, err } @@ -31584,6 +33411,7 @@ func(ts *Tensor) Relu6_()(err error) { lib.AtgRelu6_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Relu6_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31599,6 +33427,7 @@ func(ts *Tensor) Relu_()(err error) { lib.AtgRelu_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Relu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31615,9 +33444,10 @@ func(ts *Tensor) ReluOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgReluOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReluOut") return retVal, err } @@ -31631,9 +33461,10 @@ func(ts *Tensor) Remainder(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgRemainder(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Remainder() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Remainder") return retVal, err } @@ -31646,6 +33477,7 @@ func(ts *Tensor) Remainder_(other *Scalar)(err error) { lib.AtgRemainder_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Remainder_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31662,9 +33494,10 @@ func(ts *Tensor) RemainderScalarOut(out *Tensor, other *Scalar, del bool)(retVal lib.AtgRemainderScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("RemainderScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RemainderScalarOut") return retVal, err } @@ -31677,9 +33510,10 @@ func RemainderScalarTensor(selfScalar *Scalar, other *Tensor)(retVal *Tensor, er lib.AtgRemainderScalarTensor(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RemainderScalarTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RemainderScalarTensor") return retVal, err } @@ -31692,9 +33526,10 @@ func RemainderScalarTensorOut(out *Tensor, selfScalar *Scalar, other *Tensor)(re lib.AtgRemainderScalarTensorOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RemainderScalarTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RemainderScalarTensorOut") return retVal, err } @@ -31708,9 +33543,10 @@ func(ts *Tensor) RemainderTensor(other *Tensor, del bool)(retVal *Tensor, err er lib.AtgRemainderTensor(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RemainderTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RemainderTensor") return retVal, err } @@ -31723,6 +33559,7 @@ func(ts *Tensor) RemainderTensor_(other *Tensor)(err error) { lib.AtgRemainderTensor_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RemainderTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31739,9 +33576,10 @@ func(ts *Tensor) RemainderTensorOut(out *Tensor, other *Tensor, del bool)(retVal lib.AtgRemainderTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RemainderTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RemainderTensorOut") return retVal, err } @@ -31755,9 +33593,10 @@ func(ts *Tensor) Renorm(p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal lib.AtgRenorm(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Renorm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Renorm") return retVal, err } @@ -31770,6 +33609,7 @@ func(ts *Tensor) Renorm_(p *Scalar, dim int64, maxnorm *Scalar)(err error) { lib.AtgRenorm_(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Renorm_() failed: %w", err) return err } ts.ctensor = *ptr @@ -31786,9 +33626,10 @@ func(ts *Tensor) RenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, d lib.AtgRenormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("RenormOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RenormOut") return retVal, err } @@ -31803,9 +33644,10 @@ func(ts *Tensor) Repeat(repeats []int64, del bool)(retVal *Tensor, err error) { repeatsLen := len(repeats) lib.AtgRepeat(ptr, ts.ctensor, repeats, repeatsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Repeat() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Repeat") return retVal, err } @@ -31824,9 +33666,10 @@ func RepeatInterleave(repeats *Tensor, outputSize []int64)(retVal *Tensor, err e } lib.AtgRepeatInterleave(ptr, repeats.ctensor, coutputSizeVal, coutputSizeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("RepeatInterleave() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RepeatInterleave") return retVal, err } @@ -31852,9 +33695,10 @@ var coutputSizeVal int64 = 0 } lib.AtgRepeatInterleaveSelfInt(ptr, ts.ctensor, repeats, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("RepeatInterleaveSelfInt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RepeatInterleaveSelfInt") return retVal, err } @@ -31880,9 +33724,10 @@ var coutputSizeVal int64 = 0 } lib.AtgRepeatInterleaveSelfTensor(ptr, ts.ctensor, repeats.ctensor, cdimVal, cdimNull, coutputSizeVal, coutputSizeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("RepeatInterleaveSelfTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RepeatInterleaveSelfTensor") return retVal, err } @@ -31901,9 +33746,10 @@ func RepeatInterleaveTensorOut(out *Tensor, repeats *Tensor, outputSize []int64) } lib.AtgRepeatInterleaveTensorOut(ptr, out.ctensor, repeats.ctensor, coutputSizeVal, coutputSizeNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("RepeatInterleaveTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RepeatInterleaveTensorOut") return retVal, err } @@ -31918,9 +33764,10 @@ func(ts *Tensor) RepeatOut(out *Tensor, repeats []int64, del bool)(retVal *Tenso repeatsLen := len(repeats) lib.AtgRepeatOut(ptr, out.ctensor, ts.ctensor, repeats, repeatsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("RepeatOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RepeatOut") return retVal, err } @@ -31935,9 +33782,10 @@ func(ts *Tensor) ReplicationPad1d(padding []int64, del bool)(retVal *Tensor, err paddingLen := len(padding) lib.AtgReplicationPad1d(ptr, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad1d") return retVal, err } @@ -31952,9 +33800,10 @@ func(ts *Tensor) ReplicationPad1dBackward(gradOutput *Tensor, padding []int64, d paddingLen := len(padding) lib.AtgReplicationPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad1dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad1dBackward") return retVal, err } @@ -31969,9 +33818,10 @@ func(ts *Tensor) ReplicationPad1dBackwardGradInput(gradInput *Tensor, gradOutput paddingLen := len(padding) lib.AtgReplicationPad1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad1dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad1dBackwardGradInput") return retVal, err } @@ -31986,9 +33836,10 @@ func(ts *Tensor) ReplicationPad1dOut(out *Tensor, padding []int64, del bool)(ret paddingLen := len(padding) lib.AtgReplicationPad1dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad1dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad1dOut") return retVal, err } @@ -32003,9 +33854,10 @@ func(ts *Tensor) ReplicationPad2d(padding []int64, del bool)(retVal *Tensor, err paddingLen := len(padding) lib.AtgReplicationPad2d(ptr, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad2d") return retVal, err } @@ -32020,9 +33872,10 @@ func(ts *Tensor) ReplicationPad2dBackward(gradOutput *Tensor, padding []int64, d paddingLen := len(padding) lib.AtgReplicationPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad2dBackward") return retVal, err } @@ -32037,9 +33890,10 @@ func(ts *Tensor) ReplicationPad2dBackwardGradInput(gradInput *Tensor, gradOutput paddingLen := len(padding) lib.AtgReplicationPad2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad2dBackwardGradInput") return retVal, err } @@ -32054,9 +33908,10 @@ func(ts *Tensor) ReplicationPad2dOut(out *Tensor, padding []int64, del bool)(ret paddingLen := len(padding) lib.AtgReplicationPad2dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad2dOut") return retVal, err } @@ -32071,9 +33926,10 @@ func(ts *Tensor) ReplicationPad3d(padding []int64, del bool)(retVal *Tensor, err paddingLen := len(padding) lib.AtgReplicationPad3d(ptr, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad3d") return retVal, err } @@ -32088,9 +33944,10 @@ func(ts *Tensor) ReplicationPad3dBackward(gradOutput *Tensor, padding []int64, d paddingLen := len(padding) lib.AtgReplicationPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad3dBackward") return retVal, err } @@ -32105,9 +33962,10 @@ func(ts *Tensor) ReplicationPad3dBackwardGradInput(gradInput *Tensor, gradOutput paddingLen := len(padding) lib.AtgReplicationPad3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad3dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad3dBackwardGradInput") return retVal, err } @@ -32122,9 +33980,10 @@ func(ts *Tensor) ReplicationPad3dOut(out *Tensor, padding []int64, del bool)(ret paddingLen := len(padding) lib.AtgReplicationPad3dOut(ptr, out.ctensor, ts.ctensor, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReplicationPad3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReplicationPad3dOut") return retVal, err } @@ -32139,6 +33998,7 @@ func(ts *Tensor) RequiresGrad_(requiresGrad bool)(err error) { if requiresGrad { crequiresGrad = int32(1) } lib.AtgRequiresGrad_(ptr, ts.ctensor, crequiresGrad) if err = TorchErr(); err != nil { + err = fmt.Errorf("RequiresGrad_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32156,9 +34016,10 @@ func(ts *Tensor) Reshape(shape []int64, del bool)(retVal *Tensor, err error) { shapeLen := len(shape) lib.AtgReshape(ptr, ts.ctensor, shape, shapeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Reshape() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Reshape") return retVal, err } @@ -32172,9 +34033,10 @@ func(ts *Tensor) ReshapeAs(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgReshapeAs(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ReshapeAs() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ReshapeAs") return retVal, err } @@ -32189,9 +34051,10 @@ func(ts *Tensor) Resize(size []int64, del bool)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgResize(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Resize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Resize") return retVal, err } @@ -32205,6 +34068,7 @@ func(ts *Tensor) Resize_(size []int64)(err error) { sizeLen := len(size) lib.AtgResize_(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Resize_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32221,9 +34085,10 @@ func(ts *Tensor) ResizeAs(theTemplate *Tensor, del bool)(retVal *Tensor, err err lib.AtgResizeAs(ptr, ts.ctensor, theTemplate.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResizeAs() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ResizeAs") return retVal, err } @@ -32236,6 +34101,7 @@ func(ts *Tensor) ResizeAs_(theTemplate *Tensor)(err error) { lib.AtgResizeAs_(ptr, ts.ctensor, theTemplate.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResizeAs_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32252,9 +34118,10 @@ func(ts *Tensor) ResizeAsOut(out *Tensor, theTemplate *Tensor, del bool)(retVal lib.AtgResizeAsOut(ptr, out.ctensor, ts.ctensor, theTemplate.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResizeAsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ResizeAsOut") return retVal, err } @@ -32268,9 +34135,10 @@ func(ts *Tensor) ResizeAsSparse(theTemplate *Tensor, del bool)(retVal *Tensor, e lib.AtgResizeAsSparse(ptr, ts.ctensor, theTemplate.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResizeAsSparse() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ResizeAsSparse") return retVal, err } @@ -32283,6 +34151,7 @@ func(ts *Tensor) ResizeAsSparse_(theTemplate *Tensor)(err error) { lib.AtgResizeAsSparse_(ptr, ts.ctensor, theTemplate.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResizeAsSparse_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32299,9 +34168,10 @@ func(ts *Tensor) ResizeAsSparseOut(out *Tensor, theTemplate *Tensor, del bool)(r lib.AtgResizeAsSparseOut(ptr, out.ctensor, ts.ctensor, theTemplate.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResizeAsSparseOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ResizeAsSparseOut") return retVal, err } @@ -32316,9 +34186,10 @@ func(ts *Tensor) ResizeOut(out *Tensor, size []int64, del bool)(retVal *Tensor, sizeLen := len(size) lib.AtgResizeOut(ptr, out.ctensor, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResizeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ResizeOut") return retVal, err } @@ -32332,9 +34203,10 @@ func(ts *Tensor) ResolveConj(del bool)(retVal *Tensor, err error) { lib.AtgResolveConj(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResolveConj() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ResolveConj") return retVal, err } @@ -32348,9 +34220,10 @@ func(ts *Tensor) ResolveNeg(del bool)(retVal *Tensor, err error) { lib.AtgResolveNeg(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ResolveNeg() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ResolveNeg") return retVal, err } @@ -32362,6 +34235,7 @@ func(ts *Tensor) RetainsGrad(del bool)(retVal bool, err error) { retVal = lib.AtgRetainsGrad(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RetainsGrad() failed: %w", err) return retVal, err } return retVal, err @@ -32385,10 +34259,11 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.AtgRnnRelu(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("RnnRelu() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "RnnRelu_0") + retVal1 = newTensor(*ctensorPtr1, "RnnRelu_1") return retVal0, retVal1, err } @@ -32401,9 +34276,10 @@ func RnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tenso lib.AtgRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RnnReluCell() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RnnReluCell") return retVal, err } @@ -32424,10 +34300,11 @@ cbidirectional := int32(0) if bidirectional { cbidirectional = int32(1) } lib.AtgRnnReluData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) if err = TorchErr(); err != nil { + err = fmt.Errorf("RnnReluData() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "RnnReluData_0") + retVal1 = newTensor(*ctensorPtr1, "RnnReluData_1") return retVal0, retVal1, err } @@ -32450,10 +34327,11 @@ cbatchFirst := int32(0) if batchFirst { cbatchFirst = int32(1) } lib.AtgRnnTanh(ctensorPtr0, input.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) if err = TorchErr(); err != nil { + err = fmt.Errorf("RnnTanh() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "RnnTanh_0") + retVal1 = newTensor(*ctensorPtr1, "RnnTanh_1") return retVal0, retVal1, err } @@ -32466,9 +34344,10 @@ func RnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tenso lib.AtgRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RnnTanhCell() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RnnTanhCell") return retVal, err } @@ -32489,10 +34368,11 @@ cbidirectional := int32(0) if bidirectional { cbidirectional = int32(1) } lib.AtgRnnTanhData(ctensorPtr0, data.ctensor, batchSizes.ctensor, hx.ctensor, cparams, len(cparams), chasBiases, numLayers, dropout, ctrain, cbidirectional) if err = TorchErr(); err != nil { + err = fmt.Errorf("RnnTanhData() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "RnnTanhData_0") + retVal1 = newTensor(*ctensorPtr1, "RnnTanhData_1") return retVal0, retVal1, err } @@ -32508,9 +34388,10 @@ func(ts *Tensor) Roll(shifts []int64, dims []int64, del bool)(retVal *Tensor, er dimsLen := len(dims) lib.AtgRoll(ptr, ts.ctensor, shifts, shiftsLen, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Roll() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Roll") return retVal, err } @@ -32526,9 +34407,10 @@ func(ts *Tensor) RollOut(out *Tensor, shifts []int64, dims []int64, del bool)(re dimsLen := len(dims) lib.AtgRollOut(ptr, out.ctensor, ts.ctensor, shifts, shiftsLen, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("RollOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RollOut") return retVal, err } @@ -32543,9 +34425,10 @@ func(ts *Tensor) Rot90(k int64, dims []int64, del bool)(retVal *Tensor, err erro dimsLen := len(dims) lib.AtgRot90(ptr, ts.ctensor, k, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rot90() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Rot90") return retVal, err } @@ -32560,9 +34443,10 @@ func(ts *Tensor) Rot90Out(out *Tensor, k int64, dims []int64, del bool)(retVal * dimsLen := len(dims) lib.AtgRot90Out(ptr, out.ctensor, ts.ctensor, k, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rot90Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Rot90Out") return retVal, err } @@ -32576,9 +34460,10 @@ func(ts *Tensor) Round(del bool)(retVal *Tensor, err error) { lib.AtgRound(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Round() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Round") return retVal, err } @@ -32591,6 +34476,7 @@ func(ts *Tensor) Round_()(err error) { lib.AtgRound_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Round_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32607,9 +34493,10 @@ func(ts *Tensor) RoundDecimals(decimals int64, del bool)(retVal *Tensor, err err lib.AtgRoundDecimals(ptr, ts.ctensor, decimals) if err = TorchErr(); err != nil { + err = fmt.Errorf("RoundDecimals() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RoundDecimals") return retVal, err } @@ -32622,6 +34509,7 @@ func(ts *Tensor) RoundDecimals_(decimals int64)(err error) { lib.AtgRoundDecimals_(ptr, ts.ctensor, decimals) if err = TorchErr(); err != nil { + err = fmt.Errorf("RoundDecimals_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32638,9 +34526,10 @@ func(ts *Tensor) RoundDecimalsOut(out *Tensor, decimals int64, del bool)(retVal lib.AtgRoundDecimalsOut(ptr, out.ctensor, ts.ctensor, decimals) if err = TorchErr(); err != nil { + err = fmt.Errorf("RoundDecimalsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RoundDecimalsOut") return retVal, err } @@ -32654,9 +34543,10 @@ func(ts *Tensor) RoundOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgRoundOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RoundOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RoundOut") return retVal, err } @@ -32670,9 +34560,10 @@ func(ts *Tensor) RowIndices(del bool)(retVal *Tensor, err error) { lib.AtgRowIndices(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RowIndices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RowIndices") return retVal, err } @@ -32686,9 +34577,10 @@ func(ts *Tensor) RowIndicesCopy(del bool)(retVal *Tensor, err error) { lib.AtgRowIndicesCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RowIndicesCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RowIndicesCopy") return retVal, err } @@ -32702,9 +34594,10 @@ func(ts *Tensor) RowIndicesCopyOut(out *Tensor, del bool)(retVal *Tensor, err er lib.AtgRowIndicesCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RowIndicesCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RowIndicesCopyOut") return retVal, err } @@ -32719,9 +34612,10 @@ func RowStack(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgRowStack(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("RowStack() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RowStack") return retVal, err } @@ -32736,9 +34630,10 @@ func RowStackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgRowStackOut(ptr, out.ctensor, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("RowStackOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RowStackOut") return retVal, err } @@ -32754,9 +34649,10 @@ func(ts *Tensor) Rrelu(training bool, del bool)(retVal *Tensor, err error) { if training { ctraining = int32(1) } lib.AtgRrelu(ptr, ts.ctensor, ctraining) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rrelu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Rrelu") return retVal, err } @@ -32771,6 +34667,7 @@ func(ts *Tensor) Rrelu_(training bool)(err error) { if training { ctraining = int32(1) } lib.AtgRrelu_(ptr, ts.ctensor, ctraining) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rrelu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32789,9 +34686,10 @@ func(ts *Tensor) RreluWithNoise(noise *Tensor, training bool, del bool)(retVal * if training { ctraining = int32(1) } lib.AtgRreluWithNoise(ptr, ts.ctensor, noise.ctensor, ctraining) if err = TorchErr(); err != nil { + err = fmt.Errorf("RreluWithNoise() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RreluWithNoise") return retVal, err } @@ -32806,6 +34704,7 @@ func(ts *Tensor) RreluWithNoise_(noise *Tensor, training bool)(err error) { if training { ctraining = int32(1) } lib.AtgRreluWithNoise_(ptr, ts.ctensor, noise.ctensor, ctraining) if err = TorchErr(); err != nil { + err = fmt.Errorf("RreluWithNoise_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32826,9 +34725,10 @@ cselfIsResult := int32(0) if selfIsResult { cselfIsResult = int32(1) } lib.AtgRreluWithNoiseBackward(ptr, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult) if err = TorchErr(); err != nil { + err = fmt.Errorf("RreluWithNoiseBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RreluWithNoiseBackward") return retVal, err } @@ -32846,9 +34746,10 @@ cselfIsResult := int32(0) if selfIsResult { cselfIsResult = int32(1) } lib.AtgRreluWithNoiseBackwardOut(ptr, out.ctensor, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult) if err = TorchErr(); err != nil { + err = fmt.Errorf("RreluWithNoiseBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RreluWithNoiseBackwardOut") return retVal, err } @@ -32864,9 +34765,10 @@ func(ts *Tensor) RreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, de if training { ctraining = int32(1) } lib.AtgRreluWithNoiseOut(ptr, out.ctensor, ts.ctensor, noise.ctensor, ctraining) if err = TorchErr(); err != nil { + err = fmt.Errorf("RreluWithNoiseOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RreluWithNoiseOut") return retVal, err } @@ -32880,9 +34782,10 @@ func(ts *Tensor) Rsqrt(del bool)(retVal *Tensor, err error) { lib.AtgRsqrt(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rsqrt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Rsqrt") return retVal, err } @@ -32895,6 +34798,7 @@ func(ts *Tensor) Rsqrt_()(err error) { lib.AtgRsqrt_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rsqrt_() failed: %w", err) return err } ts.ctensor = *ptr @@ -32911,9 +34815,10 @@ func(ts *Tensor) RsqrtOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgRsqrtOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RsqrtOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RsqrtOut") return retVal, err } @@ -32927,9 +34832,10 @@ func(ts *Tensor) Rsub(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgRsub(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Rsub() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Rsub") return retVal, err } @@ -32943,9 +34849,10 @@ func(ts *Tensor) RsubScalar(other *Scalar, del bool)(retVal *Tensor, err error) lib.AtgRsubScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("RsubScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RsubScalar") return retVal, err } @@ -32959,9 +34866,10 @@ func(ts *Tensor) RsubScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Ten lib.AtgRsubScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("RsubScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RsubScalarOut") return retVal, err } @@ -32975,9 +34883,10 @@ func(ts *Tensor) RsubTensorOut(out *Tensor, other *Tensor, del bool)(retVal *Ten lib.AtgRsubTensorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("RsubTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "RsubTensorOut") return retVal, err } @@ -32990,9 +34899,10 @@ func ScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device lib.AtgScalarTensor(ptr, s.cscalar, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScalarTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScalarTensor") return retVal, err } @@ -33005,9 +34915,10 @@ func ScalarTensorOut(out *Tensor, s *Scalar)(retVal *Tensor, err error) { lib.AtgScalarTensorOut(ptr, out.ctensor, s.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScalarTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScalarTensorOut") return retVal, err } @@ -33022,9 +34933,10 @@ func ScaledDotProductAttention(query *Tensor, key *Tensor, value *Tensor, attnMa if isCausal { cisCausal = int32(1) } lib.AtgScaledDotProductAttention(ptr, query.ctensor, key.ctensor, value.ctensor, attnMask.ctensor, dropoutP, cisCausal) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScaledDotProductAttention() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScaledDotProductAttention") return retVal, err } @@ -33038,9 +34950,10 @@ func(ts *Tensor) Scatter(dim int64, index *Tensor, src *Tensor, del bool)(retVal lib.AtgScatter(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Scatter() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Scatter") return retVal, err } @@ -33053,6 +34966,7 @@ func(ts *Tensor) Scatter_(dim int64, index *Tensor, src *Tensor)(err error) { lib.AtgScatter_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Scatter_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33069,9 +34983,10 @@ func(ts *Tensor) ScatterAdd(dim int64, index *Tensor, src *Tensor, del bool)(ret lib.AtgScatterAdd(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterAdd() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterAdd") return retVal, err } @@ -33084,6 +34999,7 @@ func(ts *Tensor) ScatterAdd_(dim int64, index *Tensor, src *Tensor)(err error) { lib.AtgScatterAdd_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterAdd_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33100,9 +35016,10 @@ func(ts *Tensor) ScatterAddOut(out *Tensor, dim int64, index *Tensor, src *Tenso lib.AtgScatterAddOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterAddOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterAddOut") return retVal, err } @@ -33116,9 +35033,10 @@ func(ts *Tensor) ScatterReduce(dim int64, index *Tensor, src *Tensor, reduce str lib.AtgScatterReduce(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterReduce() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterReduce") return retVal, err } @@ -33131,6 +35049,7 @@ func(ts *Tensor) ScatterReduce_(dim int64, index *Tensor, src *Tensor, reduce st lib.AtgScatterReduce_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterReduce_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33147,9 +35066,10 @@ func(ts *Tensor) ScatterReduceOut(out *Tensor, dim int64, index *Tensor, src *Te lib.AtgScatterReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterReduceOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterReduceOut") return retVal, err } @@ -33163,9 +35083,10 @@ func(ts *Tensor) ScatterSrcOut(out *Tensor, dim int64, index *Tensor, src *Tenso lib.AtgScatterSrcOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, src.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterSrcOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterSrcOut") return retVal, err } @@ -33179,9 +35100,10 @@ func(ts *Tensor) ScatterValue(dim int64, index *Tensor, value *Scalar, del bool) lib.AtgScatterValue(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterValue() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterValue") return retVal, err } @@ -33194,6 +35116,7 @@ func(ts *Tensor) ScatterValue_(dim int64, index *Tensor, value *Scalar)(err erro lib.AtgScatterValue_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterValue_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33210,9 +35133,10 @@ func(ts *Tensor) ScatterValueOut(out *Tensor, dim int64, index *Tensor, value *S lib.AtgScatterValueOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterValueOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterValueOut") return retVal, err } @@ -33226,9 +35150,10 @@ func(ts *Tensor) ScatterValueReduce(dim int64, index *Tensor, value *Scalar, red lib.AtgScatterValueReduce(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterValueReduce() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterValueReduce") return retVal, err } @@ -33241,6 +35166,7 @@ func(ts *Tensor) ScatterValueReduce_(dim int64, index *Tensor, value *Scalar, re lib.AtgScatterValueReduce_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterValueReduce_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33257,9 +35183,10 @@ func(ts *Tensor) ScatterValueReduceOut(out *Tensor, dim int64, index *Tensor, va lib.AtgScatterValueReduceOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) if err = TorchErr(); err != nil { + err = fmt.Errorf("ScatterValueReduceOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ScatterValueReduceOut") return retVal, err } @@ -33277,9 +35204,10 @@ cright := int32(0) if right { cright = int32(1) } lib.AtgSearchsorted(ptr, sortedSequence.ctensor, ts.ctensor, coutInt32, cright, side, sorter.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Searchsorted() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Searchsorted") return retVal, err } @@ -33296,9 +35224,10 @@ cright := int32(0) if right { cright = int32(1) } lib.AtgSearchsortedScalar(ptr, sortedSequence.ctensor, selfScalar.cscalar, coutInt32, cright, side, sorter.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SearchsortedScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SearchsortedScalar") return retVal, err } @@ -33315,9 +35244,10 @@ cright := int32(0) if right { cright = int32(1) } lib.AtgSearchsortedScalarOut(ptr, out.ctensor, sortedSequence.ctensor, selfScalar.cscalar, coutInt32, cright, side, sorter.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SearchsortedScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SearchsortedScalarOut") return retVal, err } @@ -33335,9 +35265,10 @@ cright := int32(0) if right { cright = int32(1) } lib.AtgSearchsortedTensorOut(ptr, out.ctensor, sortedSequence.ctensor, ts.ctensor, coutInt32, cright, side, sorter.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SearchsortedTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SearchsortedTensorOut") return retVal, err } @@ -33352,9 +35283,10 @@ func SegmentReduce(data *Tensor, reduce string, lengths *Tensor, indices *Tensor if unsafety { cunsafety = int32(1) } lib.AtgSegmentReduce(ptr, data.ctensor, reduce, lengths.ctensor, indices.ctensor, offsets.ctensor, axis, cunsafety, initial.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SegmentReduce() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SegmentReduce") return retVal, err } @@ -33369,9 +35301,10 @@ func SegmentReduceOut(out *Tensor, data *Tensor, reduce string, lengths *Tensor, if unsafety { cunsafety = int32(1) } lib.AtgSegmentReduceOut(ptr, out.ctensor, data.ctensor, reduce, lengths.ctensor, indices.ctensor, offsets.ctensor, axis, cunsafety, initial.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SegmentReduceOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SegmentReduceOut") return retVal, err } @@ -33385,9 +35318,10 @@ func(ts *Tensor) Select(dim int64, index int64, del bool)(retVal *Tensor, err er lib.AtgSelect(ptr, ts.ctensor, dim, index) if err = TorchErr(); err != nil { + err = fmt.Errorf("Select() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Select") return retVal, err } @@ -33401,9 +35335,10 @@ func SelectBackward(gradOutput *Tensor, inputSizes []int64, dim int64, index int inputSizesLen := len(inputSizes) lib.AtgSelectBackward(ptr, gradOutput.ctensor, inputSizes, inputSizesLen, dim, index) if err = TorchErr(); err != nil { + err = fmt.Errorf("SelectBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SelectBackward") return retVal, err } @@ -33417,9 +35352,10 @@ func SelectBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, dim inputSizesLen := len(inputSizes) lib.AtgSelectBackwardOut(ptr, out.ctensor, gradOutput.ctensor, inputSizes, inputSizesLen, dim, index) if err = TorchErr(); err != nil { + err = fmt.Errorf("SelectBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SelectBackwardOut") return retVal, err } @@ -33433,9 +35369,10 @@ func(ts *Tensor) SelectCopy(dim int64, index int64, del bool)(retVal *Tensor, er lib.AtgSelectCopy(ptr, ts.ctensor, dim, index) if err = TorchErr(); err != nil { + err = fmt.Errorf("SelectCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SelectCopy") return retVal, err } @@ -33449,9 +35386,10 @@ func(ts *Tensor) SelectCopyIntOut(out *Tensor, dim int64, index int64, del bool) lib.AtgSelectCopyIntOut(ptr, out.ctensor, ts.ctensor, dim, index) if err = TorchErr(); err != nil { + err = fmt.Errorf("SelectCopyIntOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SelectCopyIntOut") return retVal, err } @@ -33465,9 +35403,10 @@ func(ts *Tensor) SelectScatter(src *Tensor, dim int64, index int64, del bool)(re lib.AtgSelectScatter(ptr, ts.ctensor, src.ctensor, dim, index) if err = TorchErr(); err != nil { + err = fmt.Errorf("SelectScatter() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SelectScatter") return retVal, err } @@ -33481,9 +35420,10 @@ func(ts *Tensor) SelectScatterOut(out *Tensor, src *Tensor, dim int64, index int lib.AtgSelectScatterOut(ptr, out.ctensor, ts.ctensor, src.ctensor, dim, index) if err = TorchErr(); err != nil { + err = fmt.Errorf("SelectScatterOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SelectScatterOut") return retVal, err } @@ -33497,9 +35437,10 @@ func(ts *Tensor) Selu(del bool)(retVal *Tensor, err error) { lib.AtgSelu(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Selu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Selu") return retVal, err } @@ -33512,6 +35453,7 @@ func(ts *Tensor) Selu_()(err error) { lib.AtgSelu_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Selu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33528,9 +35470,10 @@ func(ts *Tensor) Set(del bool)(retVal *Tensor, err error) { lib.AtgSet(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Set() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Set") return retVal, err } @@ -33543,6 +35486,7 @@ func(ts *Tensor) Set_()(err error) { lib.AtgSet_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Set_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33559,9 +35503,10 @@ func(ts *Tensor) SetOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSetOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SetOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SetOut") return retVal, err } @@ -33577,9 +35522,10 @@ func(ts *Tensor) SetRequiresGrad(r bool, del bool)(retVal *Tensor, err error) { if r { cr = int32(1) } lib.AtgSetRequiresGrad(ptr, ts.ctensor, cr) if err = TorchErr(); err != nil { + err = fmt.Errorf("SetRequiresGrad() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SetRequiresGrad") return retVal, err } @@ -33593,9 +35539,10 @@ func(ts *Tensor) SetSourceTensor(source *Tensor, del bool)(retVal *Tensor, err e lib.AtgSetSourceTensor(ptr, ts.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SetSourceTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SetSourceTensor") return retVal, err } @@ -33608,6 +35555,7 @@ func(ts *Tensor) SetSourceTensor_(source *Tensor)(err error) { lib.AtgSetSourceTensor_(ptr, ts.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SetSourceTensor_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33624,9 +35572,10 @@ func(ts *Tensor) SetSourceTensorOut(out *Tensor, source *Tensor, del bool)(retVa lib.AtgSetSourceTensorOut(ptr, out.ctensor, ts.ctensor, source.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SetSourceTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SetSourceTensorOut") return retVal, err } @@ -33641,6 +35590,7 @@ func(ts *Tensor) SetSourceTensorStorageOffset_(source *Tensor, storageOffset int strideLen := len(stride) lib.AtgSetSourceTensorStorageOffset_(ptr, ts.ctensor, source.ctensor, storageOffset, size, sizeLen, stride, strideLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SetSourceTensorStorageOffset_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33657,9 +35607,10 @@ func(ts *Tensor) Sgn(del bool)(retVal *Tensor, err error) { lib.AtgSgn(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sgn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sgn") return retVal, err } @@ -33672,6 +35623,7 @@ func(ts *Tensor) Sgn_()(err error) { lib.AtgSgn_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sgn_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33688,9 +35640,10 @@ func(ts *Tensor) SgnOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSgnOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SgnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SgnOut") return retVal, err } @@ -33704,9 +35657,10 @@ func(ts *Tensor) Sigmoid(del bool)(retVal *Tensor, err error) { lib.AtgSigmoid(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sigmoid() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sigmoid") return retVal, err } @@ -33719,6 +35673,7 @@ func(ts *Tensor) Sigmoid_()(err error) { lib.AtgSigmoid_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sigmoid_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33734,9 +35689,10 @@ func SigmoidBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor, err err lib.AtgSigmoidBackward(ptr, gradOutput.ctensor, output.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SigmoidBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SigmoidBackward") return retVal, err } @@ -33749,9 +35705,10 @@ func SigmoidBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Ten lib.AtgSigmoidBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SigmoidBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SigmoidBackwardGradInput") return retVal, err } @@ -33765,9 +35722,10 @@ func(ts *Tensor) SigmoidOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSigmoidOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SigmoidOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SigmoidOut") return retVal, err } @@ -33781,9 +35739,10 @@ func(ts *Tensor) Sign(del bool)(retVal *Tensor, err error) { lib.AtgSign(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sign() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sign") return retVal, err } @@ -33796,6 +35755,7 @@ func(ts *Tensor) Sign_()(err error) { lib.AtgSign_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sign_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33812,9 +35772,10 @@ func(ts *Tensor) SignOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSignOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SignOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SignOut") return retVal, err } @@ -33828,9 +35789,10 @@ func(ts *Tensor) Signbit(del bool)(retVal *Tensor, err error) { lib.AtgSignbit(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Signbit() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Signbit") return retVal, err } @@ -33844,9 +35806,10 @@ func(ts *Tensor) SignbitOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSignbitOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SignbitOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SignbitOut") return retVal, err } @@ -33860,9 +35823,10 @@ func(ts *Tensor) Silu(del bool)(retVal *Tensor, err error) { lib.AtgSilu(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Silu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Silu") return retVal, err } @@ -33875,6 +35839,7 @@ func(ts *Tensor) Silu_()(err error) { lib.AtgSilu_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Silu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33891,9 +35856,10 @@ func(ts *Tensor) SiluBackward(gradOutput *Tensor, del bool)(retVal *Tensor, err lib.AtgSiluBackward(ptr, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SiluBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SiluBackward") return retVal, err } @@ -33907,9 +35873,10 @@ func(ts *Tensor) SiluBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, de lib.AtgSiluBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SiluBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SiluBackwardGradInput") return retVal, err } @@ -33923,9 +35890,10 @@ func(ts *Tensor) SiluOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSiluOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SiluOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SiluOut") return retVal, err } @@ -33939,9 +35907,10 @@ func(ts *Tensor) Sin(del bool)(retVal *Tensor, err error) { lib.AtgSin(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sin() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sin") return retVal, err } @@ -33954,6 +35923,7 @@ func(ts *Tensor) Sin_()(err error) { lib.AtgSin_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sin_() failed: %w", err) return err } ts.ctensor = *ptr @@ -33970,9 +35940,10 @@ func(ts *Tensor) SinOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSinOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SinOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SinOut") return retVal, err } @@ -33986,9 +35957,10 @@ func(ts *Tensor) Sinc(del bool)(retVal *Tensor, err error) { lib.AtgSinc(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sinc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sinc") return retVal, err } @@ -34001,6 +35973,7 @@ func(ts *Tensor) Sinc_()(err error) { lib.AtgSinc_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sinc_() failed: %w", err) return err } ts.ctensor = *ptr @@ -34017,9 +35990,10 @@ func(ts *Tensor) SincOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSincOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SincOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SincOut") return retVal, err } @@ -34033,9 +36007,10 @@ func(ts *Tensor) Sinh(del bool)(retVal *Tensor, err error) { lib.AtgSinh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sinh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sinh") return retVal, err } @@ -34048,6 +36023,7 @@ func(ts *Tensor) Sinh_()(err error) { lib.AtgSinh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sinh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -34064,9 +36040,10 @@ func(ts *Tensor) SinhOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSinhOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SinhOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SinhOut") return retVal, err } @@ -34092,9 +36069,10 @@ var cendVal int64 = 0 } lib.AtgSlice(ptr, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("Slice() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Slice") return retVal, err } @@ -34108,9 +36086,10 @@ func SliceBackward(gradOutput *Tensor, inputSizes []int64, dim int64, start int6 inputSizesLen := len(inputSizes) lib.AtgSliceBackward(ptr, gradOutput.ctensor, inputSizes, inputSizesLen, dim, start, end, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("SliceBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SliceBackward") return retVal, err } @@ -34124,9 +36103,10 @@ func SliceBackwardOut(out *Tensor, gradOutput *Tensor, inputSizes []int64, dim i inputSizesLen := len(inputSizes) lib.AtgSliceBackwardOut(ptr, out.ctensor, gradOutput.ctensor, inputSizes, inputSizesLen, dim, start, end, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("SliceBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SliceBackwardOut") return retVal, err } @@ -34152,9 +36132,10 @@ var cendVal int64 = 0 } lib.AtgSliceCopy(ptr, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("SliceCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SliceCopy") return retVal, err } @@ -34180,9 +36161,10 @@ var cendVal int64 = 0 } lib.AtgSliceCopyTensorOut(ptr, out.ctensor, ts.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("SliceCopyTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SliceCopyTensorOut") return retVal, err } @@ -34208,9 +36190,10 @@ var cendVal int64 = 0 } lib.AtgSliceScatter(ptr, ts.ctensor, src.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("SliceScatter() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SliceScatter") return retVal, err } @@ -34236,9 +36219,10 @@ var cendVal int64 = 0 } lib.AtgSliceScatterOut(ptr, out.ctensor, ts.ctensor, src.ctensor, dim, cstartVal, cstartNull, cendVal, cendNull, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("SliceScatterOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SliceScatterOut") return retVal, err } @@ -34252,10 +36236,11 @@ func(ts *Tensor) Slogdet(del bool)(retVal0 *Tensor, retVal1 *Tensor, err error) lib.AtgSlogdet(ctensorPtr0, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Slogdet() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Slogdet_0") + retVal1 = newTensor(*ctensorPtr1, "Slogdet_1") return retVal0, retVal1, err } @@ -34269,10 +36254,11 @@ func(ts *Tensor) SlogdetOut(sign *Tensor, logabsdet *Tensor, del bool)(retVal0 * lib.AtgSlogdetOut(ctensorPtr0, sign.ctensor, logabsdet.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlogdetOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "SlogdetOut_0") + retVal1 = newTensor(*ctensorPtr1, "SlogdetOut_1") return retVal0, retVal1, err } @@ -34289,9 +36275,10 @@ strideLen := len(stride) paddingLen := len(padding) lib.AtgSlowConv3d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConv3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConv3d") return retVal, err } @@ -34308,9 +36295,10 @@ strideLen := len(stride) paddingLen := len(padding) lib.AtgSlowConv3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConv3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConv3dOut") return retVal, err } @@ -34328,9 +36316,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgSlowConvDilated2d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConvDilated2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConvDilated2d") return retVal, err } @@ -34348,9 +36337,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgSlowConvDilated2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConvDilated2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConvDilated2dOut") return retVal, err } @@ -34368,9 +36358,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgSlowConvDilated3d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConvDilated3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConvDilated3d") return retVal, err } @@ -34388,9 +36379,10 @@ paddingLen := len(padding) dilationLen := len(dilation) lib.AtgSlowConvDilated3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConvDilated3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConvDilated3dOut") return retVal, err } @@ -34409,9 +36401,10 @@ outputPaddingLen := len(outputPadding) dilationLen := len(dilation) lib.AtgSlowConvTranspose2d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConvTranspose2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConvTranspose2d") return retVal, err } @@ -34430,9 +36423,10 @@ outputPaddingLen := len(outputPadding) dilationLen := len(dilation) lib.AtgSlowConvTranspose2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConvTranspose2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConvTranspose2dOut") return retVal, err } @@ -34451,9 +36445,10 @@ outputPaddingLen := len(outputPadding) dilationLen := len(dilation) lib.AtgSlowConvTranspose3d(ptr, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConvTranspose3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConvTranspose3d") return retVal, err } @@ -34472,9 +36467,10 @@ outputPaddingLen := len(outputPadding) dilationLen := len(dilation) lib.AtgSlowConvTranspose3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, kernelSizeLen, bias.ctensor, stride, strideLen, padding, paddingLen, outputPadding, outputPaddingLen, dilation, dilationLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SlowConvTranspose3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SlowConvTranspose3dOut") return retVal, err } @@ -34488,9 +36484,10 @@ func(ts *Tensor) Smm(mat2 *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSmm(ptr, ts.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Smm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Smm") return retVal, err } @@ -34504,9 +36501,10 @@ func(ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, beta float64, del lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction, beta) if err = TorchErr(); err != nil { + err = fmt.Errorf("SmoothL1Loss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SmoothL1Loss") return retVal, err } @@ -34520,9 +36518,10 @@ func(ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduct lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta) if err = TorchErr(); err != nil { + err = fmt.Errorf("SmoothL1LossBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SmoothL1LossBackward") return retVal, err } @@ -34536,9 +36535,10 @@ func(ts *Tensor) SmoothL1LossBackwardGradInput(gradInput *Tensor, gradOutput *Te lib.AtgSmoothL1LossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta) if err = TorchErr(); err != nil { + err = fmt.Errorf("SmoothL1LossBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SmoothL1LossBackwardGradInput") return retVal, err } @@ -34552,9 +36552,10 @@ func(ts *Tensor) SmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, b lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, beta) if err = TorchErr(); err != nil { + err = fmt.Errorf("SmoothL1LossOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SmoothL1LossOut") return retVal, err } @@ -34568,9 +36569,10 @@ func(ts *Tensor) SoftMarginLoss(target *Tensor, reduction int64, del bool)(retVa lib.AtgSoftMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftMarginLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftMarginLoss") return retVal, err } @@ -34584,9 +36586,10 @@ func(ts *Tensor) SoftMarginLossBackward(gradOutput *Tensor, target *Tensor, redu lib.AtgSoftMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftMarginLossBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftMarginLossBackward") return retVal, err } @@ -34600,9 +36603,10 @@ func(ts *Tensor) SoftMarginLossBackwardGradInput(gradInput *Tensor, gradOutput * lib.AtgSoftMarginLossBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftMarginLossBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftMarginLossBackwardGradInput") return retVal, err } @@ -34616,9 +36620,10 @@ func(ts *Tensor) SoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, lib.AtgSoftMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftMarginLossOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftMarginLossOut") return retVal, err } @@ -34632,9 +36637,10 @@ func(ts *Tensor) Softmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor, lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Softmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Softmax") return retVal, err } @@ -34648,9 +36654,10 @@ func(ts *Tensor) SoftmaxIntOut(out *Tensor, dim int64, dtype gotch.DType, del bo lib.AtgSoftmaxIntOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftmaxIntOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftmaxIntOut") return retVal, err } @@ -34664,9 +36671,10 @@ func(ts *Tensor) Softplus(del bool)(retVal *Tensor, err error) { lib.AtgSoftplus(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Softplus() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Softplus") return retVal, err } @@ -34680,9 +36688,10 @@ func(ts *Tensor) SoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *S lib.AtgSoftplusBackward(ptr, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftplusBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftplusBackward") return retVal, err } @@ -34696,9 +36705,10 @@ func(ts *Tensor) SoftplusBackwardGradInput(gradInput *Tensor, gradOutput *Tensor lib.AtgSoftplusBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftplusBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftplusBackwardGradInput") return retVal, err } @@ -34712,9 +36722,10 @@ func(ts *Tensor) SoftplusOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSoftplusOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftplusOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftplusOut") return retVal, err } @@ -34728,9 +36739,10 @@ func(ts *Tensor) Softshrink(del bool)(retVal *Tensor, err error) { lib.AtgSoftshrink(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Softshrink() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Softshrink") return retVal, err } @@ -34744,9 +36756,10 @@ func(ts *Tensor) SoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool) lib.AtgSoftshrinkBackward(ptr, gradOutput.ctensor, ts.ctensor, lambd.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftshrinkBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftshrinkBackward") return retVal, err } @@ -34760,9 +36773,10 @@ func(ts *Tensor) SoftshrinkBackwardGradInput(gradInput *Tensor, gradOutput *Tens lib.AtgSoftshrinkBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, lambd.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftshrinkBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftshrinkBackwardGradInput") return retVal, err } @@ -34776,9 +36790,10 @@ func(ts *Tensor) SoftshrinkOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgSoftshrinkOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SoftshrinkOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SoftshrinkOut") return retVal, err } @@ -34794,10 +36809,11 @@ func(ts *Tensor) Sort(dim int64, descending bool, del bool)(retVal0 *Tensor, ret if descending { cdescending = int32(1) } lib.AtgSort(ctensorPtr0, ts.ctensor, dim, cdescending) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sort() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Sort_0") + retVal1 = newTensor(*ctensorPtr1, "Sort_1") return retVal0, retVal1, err } @@ -34815,10 +36831,11 @@ cdescending := int32(0) if descending { cdescending = int32(1) } lib.AtgSortStable(ctensorPtr0, ts.ctensor, cstable, dim, cdescending) if err = TorchErr(); err != nil { + err = fmt.Errorf("SortStable() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "SortStable_0") + retVal1 = newTensor(*ctensorPtr1, "SortStable_1") return retVal0, retVal1, err } @@ -34834,10 +36851,11 @@ func(ts *Tensor) SortValues(values *Tensor, indices *Tensor, dim int64, descendi if descending { cdescending = int32(1) } lib.AtgSortValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, dim, cdescending) if err = TorchErr(); err != nil { + err = fmt.Errorf("SortValues() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "SortValues_0") + retVal1 = newTensor(*ctensorPtr1, "SortValues_1") return retVal0, retVal1, err } @@ -34855,10 +36873,11 @@ cdescending := int32(0) if descending { cdescending = int32(1) } lib.AtgSortValuesStable(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, cstable, dim, cdescending) if err = TorchErr(); err != nil { + err = fmt.Errorf("SortValuesStable() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "SortValuesStable_0") + retVal1 = newTensor(*ctensorPtr1, "SortValuesStable_1") return retVal0, retVal1, err } @@ -34871,9 +36890,10 @@ func SparseBscTensor(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, op lib.AtgSparseBscTensor(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseBscTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseBscTensor") return retVal, err } @@ -34887,9 +36907,10 @@ func SparseBscTensorCcolRowValueSize(ccolIndices *Tensor, rowIndices *Tensor, va sizeLen := len(size) lib.AtgSparseBscTensorCcolRowValueSize(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseBscTensorCcolRowValueSize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseBscTensorCcolRowValueSize") return retVal, err } @@ -34902,9 +36923,10 @@ func SparseBsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, op lib.AtgSparseBsrTensor(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseBsrTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseBsrTensor") return retVal, err } @@ -34918,9 +36940,10 @@ func SparseBsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, va sizeLen := len(size) lib.AtgSparseBsrTensorCrowColValueSize(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseBsrTensorCrowColValueSize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseBsrTensorCrowColValueSize") return retVal, err } @@ -34933,9 +36956,10 @@ func SparseCompressedTensor(compressedIndices *Tensor, plainIndices *Tensor, val lib.AtgSparseCompressedTensor(ptr, compressedIndices.ctensor, plainIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCompressedTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCompressedTensor") return retVal, err } @@ -34949,9 +36973,10 @@ func SparseCompressedTensorCompPlainValueSize(compressedIndices *Tensor, plainIn sizeLen := len(size) lib.AtgSparseCompressedTensorCompPlainValueSize(ptr, compressedIndices.ctensor, plainIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCompressedTensorCompPlainValueSize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCompressedTensorCompPlainValueSize") return retVal, err } @@ -34965,9 +36990,10 @@ func SparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch. sizeLen := len(size) lib.AtgSparseCooTensor(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCooTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCooTensor") return retVal, err } @@ -34980,9 +37006,10 @@ func SparseCooTensorIndices(indices *Tensor, values *Tensor, optionsKind gotch.D lib.AtgSparseCooTensorIndices(ptr, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCooTensorIndices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCooTensorIndices") return retVal, err } @@ -34996,9 +37023,10 @@ func SparseCooTensorIndicesSize(indices *Tensor, values *Tensor, size []int64, o sizeLen := len(size) lib.AtgSparseCooTensorIndicesSize(ptr, indices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCooTensorIndicesSize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCooTensorIndicesSize") return retVal, err } @@ -35012,9 +37040,10 @@ func SparseCooTensorSizeOut(out *Tensor, size []int64)(retVal *Tensor, err error sizeLen := len(size) lib.AtgSparseCooTensorSizeOut(ptr, out.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCooTensorSizeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCooTensorSizeOut") return retVal, err } @@ -35027,9 +37056,10 @@ func SparseCscTensor(ccolIndices *Tensor, rowIndices *Tensor, values *Tensor, op lib.AtgSparseCscTensor(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCscTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCscTensor") return retVal, err } @@ -35043,9 +37073,10 @@ func SparseCscTensorCcolRowValueSize(ccolIndices *Tensor, rowIndices *Tensor, va sizeLen := len(size) lib.AtgSparseCscTensorCcolRowValueSize(ptr, ccolIndices.ctensor, rowIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCscTensorCcolRowValueSize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCscTensorCcolRowValueSize") return retVal, err } @@ -35058,9 +37089,10 @@ func SparseCsrTensor(crowIndices *Tensor, colIndices *Tensor, values *Tensor, op lib.AtgSparseCsrTensor(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCsrTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCsrTensor") return retVal, err } @@ -35074,9 +37106,10 @@ func SparseCsrTensorCrowColValueSize(crowIndices *Tensor, colIndices *Tensor, va sizeLen := len(size) lib.AtgSparseCsrTensorCrowColValueSize(ptr, crowIndices.ctensor, colIndices.ctensor, values.ctensor, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseCsrTensorCrowColValueSize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseCsrTensorCrowColValueSize") return retVal, err } @@ -35088,6 +37121,7 @@ func(ts *Tensor) SparseDim(del bool)(retVal int64, err error) { retVal = lib.AtgSparseDim(ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseDim() failed: %w", err) return retVal, err } return retVal, err @@ -35102,9 +37136,10 @@ func(ts *Tensor) SparseMask(mask *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSparseMask(ptr, ts.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseMask() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseMask") return retVal, err } @@ -35118,9 +37153,10 @@ func(ts *Tensor) SparseMaskOut(out *Tensor, mask *Tensor, del bool)(retVal *Tens lib.AtgSparseMaskOut(ptr, out.ctensor, ts.ctensor, mask.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseMaskOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseMaskOut") return retVal, err } @@ -35135,9 +37171,10 @@ func(ts *Tensor) SparseResize(size []int64, sparseDim int64, denseDim int64, del sizeLen := len(size) lib.AtgSparseResize(ptr, ts.ctensor, size, sizeLen, sparseDim, denseDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseResize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseResize") return retVal, err } @@ -35151,6 +37188,7 @@ func(ts *Tensor) SparseResize_(size []int64, sparseDim int64, denseDim int64)(er sizeLen := len(size) lib.AtgSparseResize_(ptr, ts.ctensor, size, sizeLen, sparseDim, denseDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseResize_() failed: %w", err) return err } ts.ctensor = *ptr @@ -35168,9 +37206,10 @@ func(ts *Tensor) SparseResizeAndClear(size []int64, sparseDim int64, denseDim in sizeLen := len(size) lib.AtgSparseResizeAndClear(ptr, ts.ctensor, size, sizeLen, sparseDim, denseDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseResizeAndClear() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseResizeAndClear") return retVal, err } @@ -35184,6 +37223,7 @@ func(ts *Tensor) SparseResizeAndClear_(size []int64, sparseDim int64, denseDim i sizeLen := len(size) lib.AtgSparseResizeAndClear_(ptr, ts.ctensor, size, sizeLen, sparseDim, denseDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseResizeAndClear_() failed: %w", err) return err } ts.ctensor = *ptr @@ -35201,9 +37241,10 @@ func(ts *Tensor) SparseResizeAndClearOut(out *Tensor, size []int64, sparseDim in sizeLen := len(size) lib.AtgSparseResizeAndClearOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, sparseDim, denseDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseResizeAndClearOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseResizeAndClearOut") return retVal, err } @@ -35218,9 +37259,10 @@ func(ts *Tensor) SparseResizeOut(out *Tensor, size []int64, sparseDim int64, den sizeLen := len(size) lib.AtgSparseResizeOut(ptr, out.ctensor, ts.ctensor, size, sizeLen, sparseDim, denseDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseResizeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseResizeOut") return retVal, err } @@ -35234,9 +37276,10 @@ func(ts *Tensor) SparseSampledAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal lib.AtgSparseSampledAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseSampledAddmm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseSampledAddmm") return retVal, err } @@ -35250,9 +37293,10 @@ func(ts *Tensor) SparseSampledAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, lib.AtgSparseSampledAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SparseSampledAddmmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SparseSampledAddmmOut") return retVal, err } @@ -35265,9 +37309,10 @@ func SpecialAiryAi(x *Tensor)(retVal *Tensor, err error) { lib.AtgSpecialAiryAi(ptr, x.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialAiryAi() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialAiryAi") return retVal, err } @@ -35280,9 +37325,10 @@ func SpecialAiryAiOut(out *Tensor, x *Tensor)(retVal *Tensor, err error) { lib.AtgSpecialAiryAiOut(ptr, out.ctensor, x.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialAiryAiOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialAiryAiOut") return retVal, err } @@ -35296,9 +37342,10 @@ func(ts *Tensor) SpecialBesselJ0(del bool)(retVal *Tensor, err error) { lib.AtgSpecialBesselJ0(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialBesselJ0() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialBesselJ0") return retVal, err } @@ -35312,9 +37359,10 @@ func(ts *Tensor) SpecialBesselJ0Out(out *Tensor, del bool)(retVal *Tensor, err e lib.AtgSpecialBesselJ0Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialBesselJ0Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialBesselJ0Out") return retVal, err } @@ -35328,9 +37376,10 @@ func(ts *Tensor) SpecialBesselJ1(del bool)(retVal *Tensor, err error) { lib.AtgSpecialBesselJ1(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialBesselJ1() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialBesselJ1") return retVal, err } @@ -35344,9 +37393,10 @@ func(ts *Tensor) SpecialBesselJ1Out(out *Tensor, del bool)(retVal *Tensor, err e lib.AtgSpecialBesselJ1Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialBesselJ1Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialBesselJ1Out") return retVal, err } @@ -35360,9 +37410,10 @@ func(ts *Tensor) SpecialBesselY0(del bool)(retVal *Tensor, err error) { lib.AtgSpecialBesselY0(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialBesselY0() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialBesselY0") return retVal, err } @@ -35376,9 +37427,10 @@ func(ts *Tensor) SpecialBesselY0Out(out *Tensor, del bool)(retVal *Tensor, err e lib.AtgSpecialBesselY0Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialBesselY0Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialBesselY0Out") return retVal, err } @@ -35392,9 +37444,10 @@ func(ts *Tensor) SpecialBesselY1(del bool)(retVal *Tensor, err error) { lib.AtgSpecialBesselY1(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialBesselY1() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialBesselY1") return retVal, err } @@ -35408,9 +37461,10 @@ func(ts *Tensor) SpecialBesselY1Out(out *Tensor, del bool)(retVal *Tensor, err e lib.AtgSpecialBesselY1Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialBesselY1Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialBesselY1Out") return retVal, err } @@ -35423,9 +37477,10 @@ func SpecialChebyshevPolynomialT(x *Tensor, n *Tensor)(retVal *Tensor, err error lib.AtgSpecialChebyshevPolynomialT(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialT() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialT") return retVal, err } @@ -35438,9 +37493,10 @@ func SpecialChebyshevPolynomialTNScalar(x *Tensor, n *Scalar)(retVal *Tensor, er lib.AtgSpecialChebyshevPolynomialTNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialTNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialTNScalar") return retVal, err } @@ -35453,9 +37509,10 @@ func SpecialChebyshevPolynomialTNScalarOut(out *Tensor, x *Tensor, n *Scalar)(re lib.AtgSpecialChebyshevPolynomialTNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialTNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialTNScalarOut") return retVal, err } @@ -35468,9 +37525,10 @@ func SpecialChebyshevPolynomialTOut(out *Tensor, x *Tensor, n *Tensor)(retVal *T lib.AtgSpecialChebyshevPolynomialTOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialTOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialTOut") return retVal, err } @@ -35483,9 +37541,10 @@ func SpecialChebyshevPolynomialTXScalar(x *Scalar, n *Tensor)(retVal *Tensor, er lib.AtgSpecialChebyshevPolynomialTXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialTXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialTXScalar") return retVal, err } @@ -35498,9 +37557,10 @@ func SpecialChebyshevPolynomialTXScalarOut(out *Tensor, x *Scalar, n *Tensor)(re lib.AtgSpecialChebyshevPolynomialTXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialTXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialTXScalarOut") return retVal, err } @@ -35513,9 +37573,10 @@ func SpecialChebyshevPolynomialU(x *Tensor, n *Tensor)(retVal *Tensor, err error lib.AtgSpecialChebyshevPolynomialU(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialU() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialU") return retVal, err } @@ -35528,9 +37589,10 @@ func SpecialChebyshevPolynomialUNScalar(x *Tensor, n *Scalar)(retVal *Tensor, er lib.AtgSpecialChebyshevPolynomialUNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialUNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialUNScalar") return retVal, err } @@ -35543,9 +37605,10 @@ func SpecialChebyshevPolynomialUNScalarOut(out *Tensor, x *Tensor, n *Scalar)(re lib.AtgSpecialChebyshevPolynomialUNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialUNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialUNScalarOut") return retVal, err } @@ -35558,9 +37621,10 @@ func SpecialChebyshevPolynomialUOut(out *Tensor, x *Tensor, n *Tensor)(retVal *T lib.AtgSpecialChebyshevPolynomialUOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialUOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialUOut") return retVal, err } @@ -35573,9 +37637,10 @@ func SpecialChebyshevPolynomialUXScalar(x *Scalar, n *Tensor)(retVal *Tensor, er lib.AtgSpecialChebyshevPolynomialUXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialUXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialUXScalar") return retVal, err } @@ -35588,9 +37653,10 @@ func SpecialChebyshevPolynomialUXScalarOut(out *Tensor, x *Scalar, n *Tensor)(re lib.AtgSpecialChebyshevPolynomialUXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialUXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialUXScalarOut") return retVal, err } @@ -35603,9 +37669,10 @@ func SpecialChebyshevPolynomialV(x *Tensor, n *Tensor)(retVal *Tensor, err error lib.AtgSpecialChebyshevPolynomialV(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialV() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialV") return retVal, err } @@ -35618,9 +37685,10 @@ func SpecialChebyshevPolynomialVNScalar(x *Tensor, n *Scalar)(retVal *Tensor, er lib.AtgSpecialChebyshevPolynomialVNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialVNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialVNScalar") return retVal, err } @@ -35633,9 +37701,10 @@ func SpecialChebyshevPolynomialVNScalarOut(out *Tensor, x *Tensor, n *Scalar)(re lib.AtgSpecialChebyshevPolynomialVNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialVNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialVNScalarOut") return retVal, err } @@ -35648,9 +37717,10 @@ func SpecialChebyshevPolynomialVOut(out *Tensor, x *Tensor, n *Tensor)(retVal *T lib.AtgSpecialChebyshevPolynomialVOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialVOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialVOut") return retVal, err } @@ -35663,9 +37733,10 @@ func SpecialChebyshevPolynomialVXScalar(x *Scalar, n *Tensor)(retVal *Tensor, er lib.AtgSpecialChebyshevPolynomialVXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialVXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialVXScalar") return retVal, err } @@ -35678,9 +37749,10 @@ func SpecialChebyshevPolynomialVXScalarOut(out *Tensor, x *Scalar, n *Tensor)(re lib.AtgSpecialChebyshevPolynomialVXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialVXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialVXScalarOut") return retVal, err } @@ -35693,9 +37765,10 @@ func SpecialChebyshevPolynomialW(x *Tensor, n *Tensor)(retVal *Tensor, err error lib.AtgSpecialChebyshevPolynomialW(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialW() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialW") return retVal, err } @@ -35708,9 +37781,10 @@ func SpecialChebyshevPolynomialWNScalar(x *Tensor, n *Scalar)(retVal *Tensor, er lib.AtgSpecialChebyshevPolynomialWNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialWNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialWNScalar") return retVal, err } @@ -35723,9 +37797,10 @@ func SpecialChebyshevPolynomialWNScalarOut(out *Tensor, x *Tensor, n *Scalar)(re lib.AtgSpecialChebyshevPolynomialWNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialWNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialWNScalarOut") return retVal, err } @@ -35738,9 +37813,10 @@ func SpecialChebyshevPolynomialWOut(out *Tensor, x *Tensor, n *Tensor)(retVal *T lib.AtgSpecialChebyshevPolynomialWOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialWOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialWOut") return retVal, err } @@ -35753,9 +37829,10 @@ func SpecialChebyshevPolynomialWXScalar(x *Scalar, n *Tensor)(retVal *Tensor, er lib.AtgSpecialChebyshevPolynomialWXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialWXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialWXScalar") return retVal, err } @@ -35768,9 +37845,10 @@ func SpecialChebyshevPolynomialWXScalarOut(out *Tensor, x *Scalar, n *Tensor)(re lib.AtgSpecialChebyshevPolynomialWXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialChebyshevPolynomialWXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialChebyshevPolynomialWXScalarOut") return retVal, err } @@ -35784,9 +37862,10 @@ func(ts *Tensor) SpecialDigamma(del bool)(retVal *Tensor, err error) { lib.AtgSpecialDigamma(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialDigamma() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialDigamma") return retVal, err } @@ -35800,9 +37879,10 @@ func(ts *Tensor) SpecialDigammaOut(out *Tensor, del bool)(retVal *Tensor, err er lib.AtgSpecialDigammaOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialDigammaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialDigammaOut") return retVal, err } @@ -35816,9 +37896,10 @@ func(ts *Tensor) SpecialEntr(del bool)(retVal *Tensor, err error) { lib.AtgSpecialEntr(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialEntr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialEntr") return retVal, err } @@ -35832,9 +37913,10 @@ func(ts *Tensor) SpecialEntrOut(out *Tensor, del bool)(retVal *Tensor, err error lib.AtgSpecialEntrOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialEntrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialEntrOut") return retVal, err } @@ -35848,9 +37930,10 @@ func(ts *Tensor) SpecialErf(del bool)(retVal *Tensor, err error) { lib.AtgSpecialErf(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialErf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialErf") return retVal, err } @@ -35864,9 +37947,10 @@ func(ts *Tensor) SpecialErfOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgSpecialErfOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialErfOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialErfOut") return retVal, err } @@ -35880,9 +37964,10 @@ func(ts *Tensor) SpecialErfc(del bool)(retVal *Tensor, err error) { lib.AtgSpecialErfc(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialErfc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialErfc") return retVal, err } @@ -35896,9 +37981,10 @@ func(ts *Tensor) SpecialErfcOut(out *Tensor, del bool)(retVal *Tensor, err error lib.AtgSpecialErfcOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialErfcOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialErfcOut") return retVal, err } @@ -35912,9 +37998,10 @@ func(ts *Tensor) SpecialErfcx(del bool)(retVal *Tensor, err error) { lib.AtgSpecialErfcx(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialErfcx() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialErfcx") return retVal, err } @@ -35928,9 +38015,10 @@ func(ts *Tensor) SpecialErfcxOut(out *Tensor, del bool)(retVal *Tensor, err erro lib.AtgSpecialErfcxOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialErfcxOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialErfcxOut") return retVal, err } @@ -35944,9 +38032,10 @@ func(ts *Tensor) SpecialErfinv(del bool)(retVal *Tensor, err error) { lib.AtgSpecialErfinv(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialErfinv() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialErfinv") return retVal, err } @@ -35960,9 +38049,10 @@ func(ts *Tensor) SpecialErfinvOut(out *Tensor, del bool)(retVal *Tensor, err err lib.AtgSpecialErfinvOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialErfinvOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialErfinvOut") return retVal, err } @@ -35976,9 +38066,10 @@ func(ts *Tensor) SpecialExp2(del bool)(retVal *Tensor, err error) { lib.AtgSpecialExp2(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialExp2() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialExp2") return retVal, err } @@ -35992,9 +38083,10 @@ func(ts *Tensor) SpecialExp2Out(out *Tensor, del bool)(retVal *Tensor, err error lib.AtgSpecialExp2Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialExp2Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialExp2Out") return retVal, err } @@ -36008,9 +38100,10 @@ func(ts *Tensor) SpecialExpit(del bool)(retVal *Tensor, err error) { lib.AtgSpecialExpit(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialExpit() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialExpit") return retVal, err } @@ -36024,9 +38117,10 @@ func(ts *Tensor) SpecialExpitOut(out *Tensor, del bool)(retVal *Tensor, err erro lib.AtgSpecialExpitOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialExpitOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialExpitOut") return retVal, err } @@ -36040,9 +38134,10 @@ func(ts *Tensor) SpecialExpm1(del bool)(retVal *Tensor, err error) { lib.AtgSpecialExpm1(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialExpm1() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialExpm1") return retVal, err } @@ -36056,9 +38151,10 @@ func(ts *Tensor) SpecialExpm1Out(out *Tensor, del bool)(retVal *Tensor, err erro lib.AtgSpecialExpm1Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialExpm1Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialExpm1Out") return retVal, err } @@ -36072,9 +38168,10 @@ func(ts *Tensor) SpecialGammainc(other *Tensor, del bool)(retVal *Tensor, err er lib.AtgSpecialGammainc(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialGammainc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialGammainc") return retVal, err } @@ -36088,9 +38185,10 @@ func(ts *Tensor) SpecialGammaincOut(out *Tensor, other *Tensor, del bool)(retVal lib.AtgSpecialGammaincOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialGammaincOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialGammaincOut") return retVal, err } @@ -36104,9 +38202,10 @@ func(ts *Tensor) SpecialGammaincc(other *Tensor, del bool)(retVal *Tensor, err e lib.AtgSpecialGammaincc(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialGammaincc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialGammaincc") return retVal, err } @@ -36120,9 +38219,10 @@ func(ts *Tensor) SpecialGammainccOut(out *Tensor, other *Tensor, del bool)(retVa lib.AtgSpecialGammainccOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialGammainccOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialGammainccOut") return retVal, err } @@ -36136,9 +38236,10 @@ func(ts *Tensor) SpecialGammaln(del bool)(retVal *Tensor, err error) { lib.AtgSpecialGammaln(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialGammaln() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialGammaln") return retVal, err } @@ -36152,9 +38253,10 @@ func(ts *Tensor) SpecialGammalnOut(out *Tensor, del bool)(retVal *Tensor, err er lib.AtgSpecialGammalnOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialGammalnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialGammalnOut") return retVal, err } @@ -36167,9 +38269,10 @@ func SpecialHermitePolynomialH(x *Tensor, n *Tensor)(retVal *Tensor, err error) lib.AtgSpecialHermitePolynomialH(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialH() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialH") return retVal, err } @@ -36182,9 +38285,10 @@ func SpecialHermitePolynomialHNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err lib.AtgSpecialHermitePolynomialHNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHNScalar") return retVal, err } @@ -36197,9 +38301,10 @@ func SpecialHermitePolynomialHNScalarOut(out *Tensor, x *Tensor, n *Scalar)(retV lib.AtgSpecialHermitePolynomialHNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHNScalarOut") return retVal, err } @@ -36212,9 +38317,10 @@ func SpecialHermitePolynomialHOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Ten lib.AtgSpecialHermitePolynomialHOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHOut") return retVal, err } @@ -36227,9 +38333,10 @@ func SpecialHermitePolynomialHXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err lib.AtgSpecialHermitePolynomialHXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHXScalar") return retVal, err } @@ -36242,9 +38349,10 @@ func SpecialHermitePolynomialHXScalarOut(out *Tensor, x *Scalar, n *Tensor)(retV lib.AtgSpecialHermitePolynomialHXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHXScalarOut") return retVal, err } @@ -36257,9 +38365,10 @@ func SpecialHermitePolynomialHe(x *Tensor, n *Tensor)(retVal *Tensor, err error) lib.AtgSpecialHermitePolynomialHe(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHe() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHe") return retVal, err } @@ -36272,9 +38381,10 @@ func SpecialHermitePolynomialHeNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err lib.AtgSpecialHermitePolynomialHeNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHeNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHeNScalar") return retVal, err } @@ -36287,9 +38397,10 @@ func SpecialHermitePolynomialHeNScalarOut(out *Tensor, x *Tensor, n *Scalar)(ret lib.AtgSpecialHermitePolynomialHeNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHeNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHeNScalarOut") return retVal, err } @@ -36302,9 +38413,10 @@ func SpecialHermitePolynomialHeOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Te lib.AtgSpecialHermitePolynomialHeOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHeOut") return retVal, err } @@ -36317,9 +38429,10 @@ func SpecialHermitePolynomialHeXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err lib.AtgSpecialHermitePolynomialHeXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHeXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHeXScalar") return retVal, err } @@ -36332,9 +38445,10 @@ func SpecialHermitePolynomialHeXScalarOut(out *Tensor, x *Scalar, n *Tensor)(ret lib.AtgSpecialHermitePolynomialHeXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialHermitePolynomialHeXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialHermitePolynomialHeXScalarOut") return retVal, err } @@ -36348,9 +38462,10 @@ func(ts *Tensor) SpecialI0(del bool)(retVal *Tensor, err error) { lib.AtgSpecialI0(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialI0() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialI0") return retVal, err } @@ -36364,9 +38479,10 @@ func(ts *Tensor) SpecialI0Out(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgSpecialI0Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialI0Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialI0Out") return retVal, err } @@ -36380,9 +38496,10 @@ func(ts *Tensor) SpecialI0e(del bool)(retVal *Tensor, err error) { lib.AtgSpecialI0e(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialI0e() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialI0e") return retVal, err } @@ -36396,9 +38513,10 @@ func(ts *Tensor) SpecialI0eOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgSpecialI0eOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialI0eOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialI0eOut") return retVal, err } @@ -36412,9 +38530,10 @@ func(ts *Tensor) SpecialI1(del bool)(retVal *Tensor, err error) { lib.AtgSpecialI1(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialI1() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialI1") return retVal, err } @@ -36428,9 +38547,10 @@ func(ts *Tensor) SpecialI1Out(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgSpecialI1Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialI1Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialI1Out") return retVal, err } @@ -36444,9 +38564,10 @@ func(ts *Tensor) SpecialI1e(del bool)(retVal *Tensor, err error) { lib.AtgSpecialI1e(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialI1e() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialI1e") return retVal, err } @@ -36460,9 +38581,10 @@ func(ts *Tensor) SpecialI1eOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgSpecialI1eOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialI1eOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialI1eOut") return retVal, err } @@ -36475,9 +38597,10 @@ func SpecialLaguerrePolynomialL(x *Tensor, n *Tensor)(retVal *Tensor, err error) lib.AtgSpecialLaguerrePolynomialL(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLaguerrePolynomialL() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLaguerrePolynomialL") return retVal, err } @@ -36490,9 +38613,10 @@ func SpecialLaguerrePolynomialLNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err lib.AtgSpecialLaguerrePolynomialLNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLaguerrePolynomialLNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLaguerrePolynomialLNScalar") return retVal, err } @@ -36505,9 +38629,10 @@ func SpecialLaguerrePolynomialLNScalarOut(out *Tensor, x *Tensor, n *Scalar)(ret lib.AtgSpecialLaguerrePolynomialLNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLaguerrePolynomialLNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLaguerrePolynomialLNScalarOut") return retVal, err } @@ -36520,9 +38645,10 @@ func SpecialLaguerrePolynomialLOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Te lib.AtgSpecialLaguerrePolynomialLOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLaguerrePolynomialLOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLaguerrePolynomialLOut") return retVal, err } @@ -36535,9 +38661,10 @@ func SpecialLaguerrePolynomialLXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err lib.AtgSpecialLaguerrePolynomialLXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLaguerrePolynomialLXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLaguerrePolynomialLXScalar") return retVal, err } @@ -36550,9 +38677,10 @@ func SpecialLaguerrePolynomialLXScalarOut(out *Tensor, x *Scalar, n *Tensor)(ret lib.AtgSpecialLaguerrePolynomialLXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLaguerrePolynomialLXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLaguerrePolynomialLXScalarOut") return retVal, err } @@ -36565,9 +38693,10 @@ func SpecialLegendrePolynomialP(x *Tensor, n *Tensor)(retVal *Tensor, err error) lib.AtgSpecialLegendrePolynomialP(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLegendrePolynomialP() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLegendrePolynomialP") return retVal, err } @@ -36580,9 +38709,10 @@ func SpecialLegendrePolynomialPNScalar(x *Tensor, n *Scalar)(retVal *Tensor, err lib.AtgSpecialLegendrePolynomialPNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLegendrePolynomialPNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLegendrePolynomialPNScalar") return retVal, err } @@ -36595,9 +38725,10 @@ func SpecialLegendrePolynomialPNScalarOut(out *Tensor, x *Tensor, n *Scalar)(ret lib.AtgSpecialLegendrePolynomialPNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLegendrePolynomialPNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLegendrePolynomialPNScalarOut") return retVal, err } @@ -36610,9 +38741,10 @@ func SpecialLegendrePolynomialPOut(out *Tensor, x *Tensor, n *Tensor)(retVal *Te lib.AtgSpecialLegendrePolynomialPOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLegendrePolynomialPOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLegendrePolynomialPOut") return retVal, err } @@ -36625,9 +38757,10 @@ func SpecialLegendrePolynomialPXScalar(x *Scalar, n *Tensor)(retVal *Tensor, err lib.AtgSpecialLegendrePolynomialPXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLegendrePolynomialPXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLegendrePolynomialPXScalar") return retVal, err } @@ -36640,9 +38773,10 @@ func SpecialLegendrePolynomialPXScalarOut(out *Tensor, x *Scalar, n *Tensor)(ret lib.AtgSpecialLegendrePolynomialPXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLegendrePolynomialPXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLegendrePolynomialPXScalarOut") return retVal, err } @@ -36656,9 +38790,10 @@ func(ts *Tensor) SpecialLog1p(del bool)(retVal *Tensor, err error) { lib.AtgSpecialLog1p(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLog1p() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLog1p") return retVal, err } @@ -36672,9 +38807,10 @@ func(ts *Tensor) SpecialLog1pOut(out *Tensor, del bool)(retVal *Tensor, err erro lib.AtgSpecialLog1pOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLog1pOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLog1pOut") return retVal, err } @@ -36688,9 +38824,10 @@ func(ts *Tensor) SpecialLogNdtr(del bool)(retVal *Tensor, err error) { lib.AtgSpecialLogNdtr(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLogNdtr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLogNdtr") return retVal, err } @@ -36704,9 +38841,10 @@ func(ts *Tensor) SpecialLogNdtrOut(out *Tensor, del bool)(retVal *Tensor, err er lib.AtgSpecialLogNdtrOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLogNdtrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLogNdtrOut") return retVal, err } @@ -36720,9 +38858,10 @@ func(ts *Tensor) SpecialLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVa lib.AtgSpecialLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLogSoftmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLogSoftmax") return retVal, err } @@ -36742,9 +38881,10 @@ func(ts *Tensor) SpecialLogit(eps []float64, del bool)(retVal *Tensor, err error } lib.AtgSpecialLogit(ptr, ts.ctensor, cepsVal, cepsNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLogit() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLogit") return retVal, err } @@ -36764,9 +38904,10 @@ func(ts *Tensor) SpecialLogitOut(out *Tensor, eps []float64, del bool)(retVal *T } lib.AtgSpecialLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLogitOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLogitOut") return retVal, err } @@ -36783,9 +38924,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgSpecialLogsumexp(ptr, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLogsumexp() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLogsumexp") return retVal, err } @@ -36802,9 +38944,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgSpecialLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialLogsumexpOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialLogsumexpOut") return retVal, err } @@ -36818,9 +38961,10 @@ func(ts *Tensor) SpecialModifiedBesselI0(del bool)(retVal *Tensor, err error) { lib.AtgSpecialModifiedBesselI0(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialModifiedBesselI0() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialModifiedBesselI0") return retVal, err } @@ -36834,9 +38978,10 @@ func(ts *Tensor) SpecialModifiedBesselI0Out(out *Tensor, del bool)(retVal *Tenso lib.AtgSpecialModifiedBesselI0Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialModifiedBesselI0Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialModifiedBesselI0Out") return retVal, err } @@ -36850,9 +38995,10 @@ func(ts *Tensor) SpecialModifiedBesselI1(del bool)(retVal *Tensor, err error) { lib.AtgSpecialModifiedBesselI1(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialModifiedBesselI1() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialModifiedBesselI1") return retVal, err } @@ -36866,9 +39012,10 @@ func(ts *Tensor) SpecialModifiedBesselI1Out(out *Tensor, del bool)(retVal *Tenso lib.AtgSpecialModifiedBesselI1Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialModifiedBesselI1Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialModifiedBesselI1Out") return retVal, err } @@ -36882,9 +39029,10 @@ func(ts *Tensor) SpecialModifiedBesselK0(del bool)(retVal *Tensor, err error) { lib.AtgSpecialModifiedBesselK0(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialModifiedBesselK0() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialModifiedBesselK0") return retVal, err } @@ -36898,9 +39046,10 @@ func(ts *Tensor) SpecialModifiedBesselK0Out(out *Tensor, del bool)(retVal *Tenso lib.AtgSpecialModifiedBesselK0Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialModifiedBesselK0Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialModifiedBesselK0Out") return retVal, err } @@ -36914,9 +39063,10 @@ func(ts *Tensor) SpecialModifiedBesselK1(del bool)(retVal *Tensor, err error) { lib.AtgSpecialModifiedBesselK1(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialModifiedBesselK1() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialModifiedBesselK1") return retVal, err } @@ -36930,9 +39080,10 @@ func(ts *Tensor) SpecialModifiedBesselK1Out(out *Tensor, del bool)(retVal *Tenso lib.AtgSpecialModifiedBesselK1Out(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialModifiedBesselK1Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialModifiedBesselK1Out") return retVal, err } @@ -36946,9 +39097,10 @@ func(ts *Tensor) SpecialMultigammaln(p int64, del bool)(retVal *Tensor, err erro lib.AtgSpecialMultigammaln(ptr, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialMultigammaln() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialMultigammaln") return retVal, err } @@ -36962,9 +39114,10 @@ func(ts *Tensor) SpecialMultigammalnOut(out *Tensor, p int64, del bool)(retVal * lib.AtgSpecialMultigammalnOut(ptr, out.ctensor, ts.ctensor, p) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialMultigammalnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialMultigammalnOut") return retVal, err } @@ -36978,9 +39131,10 @@ func(ts *Tensor) SpecialNdtr(del bool)(retVal *Tensor, err error) { lib.AtgSpecialNdtr(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialNdtr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialNdtr") return retVal, err } @@ -36994,9 +39148,10 @@ func(ts *Tensor) SpecialNdtrOut(out *Tensor, del bool)(retVal *Tensor, err error lib.AtgSpecialNdtrOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialNdtrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialNdtrOut") return retVal, err } @@ -37010,9 +39165,10 @@ func(ts *Tensor) SpecialNdtri(del bool)(retVal *Tensor, err error) { lib.AtgSpecialNdtri(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialNdtri() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialNdtri") return retVal, err } @@ -37026,9 +39182,10 @@ func(ts *Tensor) SpecialNdtriOut(out *Tensor, del bool)(retVal *Tensor, err erro lib.AtgSpecialNdtriOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialNdtriOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialNdtriOut") return retVal, err } @@ -37042,9 +39199,10 @@ func(ts *Tensor) SpecialPolygamma(n int64, del bool)(retVal *Tensor, err error) lib.AtgSpecialPolygamma(ptr, n, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialPolygamma() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialPolygamma") return retVal, err } @@ -37058,9 +39216,10 @@ func(ts *Tensor) SpecialPolygammaOut(out *Tensor, n int64, del bool)(retVal *Ten lib.AtgSpecialPolygammaOut(ptr, out.ctensor, n, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialPolygammaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialPolygammaOut") return retVal, err } @@ -37074,9 +39233,10 @@ func(ts *Tensor) SpecialPsi(del bool)(retVal *Tensor, err error) { lib.AtgSpecialPsi(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialPsi() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialPsi") return retVal, err } @@ -37090,9 +39250,10 @@ func(ts *Tensor) SpecialPsiOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgSpecialPsiOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialPsiOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialPsiOut") return retVal, err } @@ -37106,9 +39267,10 @@ func(ts *Tensor) SpecialRound(decimals int64, del bool)(retVal *Tensor, err erro lib.AtgSpecialRound(ptr, ts.ctensor, decimals) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialRound() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialRound") return retVal, err } @@ -37122,9 +39284,10 @@ func(ts *Tensor) SpecialRoundOut(out *Tensor, decimals int64, del bool)(retVal * lib.AtgSpecialRoundOut(ptr, out.ctensor, ts.ctensor, decimals) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialRoundOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialRoundOut") return retVal, err } @@ -37137,9 +39300,10 @@ func SpecialScaledModifiedBesselK0(x *Tensor)(retVal *Tensor, err error) { lib.AtgSpecialScaledModifiedBesselK0(ptr, x.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialScaledModifiedBesselK0() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialScaledModifiedBesselK0") return retVal, err } @@ -37152,9 +39316,10 @@ func SpecialScaledModifiedBesselK0Out(out *Tensor, x *Tensor)(retVal *Tensor, er lib.AtgSpecialScaledModifiedBesselK0Out(ptr, out.ctensor, x.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialScaledModifiedBesselK0Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialScaledModifiedBesselK0Out") return retVal, err } @@ -37167,9 +39332,10 @@ func SpecialScaledModifiedBesselK1(x *Tensor)(retVal *Tensor, err error) { lib.AtgSpecialScaledModifiedBesselK1(ptr, x.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialScaledModifiedBesselK1() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialScaledModifiedBesselK1") return retVal, err } @@ -37182,9 +39348,10 @@ func SpecialScaledModifiedBesselK1Out(out *Tensor, x *Tensor)(retVal *Tensor, er lib.AtgSpecialScaledModifiedBesselK1Out(ptr, out.ctensor, x.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialScaledModifiedBesselK1Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialScaledModifiedBesselK1Out") return retVal, err } @@ -37197,9 +39364,10 @@ func SpecialShiftedChebyshevPolynomialT(x *Tensor, n *Tensor)(retVal *Tensor, er lib.AtgSpecialShiftedChebyshevPolynomialT(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialT() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialT") return retVal, err } @@ -37212,9 +39380,10 @@ func SpecialShiftedChebyshevPolynomialTNScalar(x *Tensor, n *Scalar)(retVal *Ten lib.AtgSpecialShiftedChebyshevPolynomialTNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialTNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialTNScalar") return retVal, err } @@ -37227,9 +39396,10 @@ func SpecialShiftedChebyshevPolynomialTNScalarOut(out *Tensor, x *Tensor, n *Sca lib.AtgSpecialShiftedChebyshevPolynomialTNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialTNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialTNScalarOut") return retVal, err } @@ -37242,9 +39412,10 @@ func SpecialShiftedChebyshevPolynomialTOut(out *Tensor, x *Tensor, n *Tensor)(re lib.AtgSpecialShiftedChebyshevPolynomialTOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialTOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialTOut") return retVal, err } @@ -37257,9 +39428,10 @@ func SpecialShiftedChebyshevPolynomialTXScalar(x *Scalar, n *Tensor)(retVal *Ten lib.AtgSpecialShiftedChebyshevPolynomialTXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialTXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialTXScalar") return retVal, err } @@ -37272,9 +39444,10 @@ func SpecialShiftedChebyshevPolynomialTXScalarOut(out *Tensor, x *Scalar, n *Ten lib.AtgSpecialShiftedChebyshevPolynomialTXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialTXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialTXScalarOut") return retVal, err } @@ -37287,9 +39460,10 @@ func SpecialShiftedChebyshevPolynomialU(x *Tensor, n *Tensor)(retVal *Tensor, er lib.AtgSpecialShiftedChebyshevPolynomialU(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialU() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialU") return retVal, err } @@ -37302,9 +39476,10 @@ func SpecialShiftedChebyshevPolynomialUNScalar(x *Tensor, n *Scalar)(retVal *Ten lib.AtgSpecialShiftedChebyshevPolynomialUNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialUNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialUNScalar") return retVal, err } @@ -37317,9 +39492,10 @@ func SpecialShiftedChebyshevPolynomialUNScalarOut(out *Tensor, x *Tensor, n *Sca lib.AtgSpecialShiftedChebyshevPolynomialUNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialUNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialUNScalarOut") return retVal, err } @@ -37332,9 +39508,10 @@ func SpecialShiftedChebyshevPolynomialUOut(out *Tensor, x *Tensor, n *Tensor)(re lib.AtgSpecialShiftedChebyshevPolynomialUOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialUOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialUOut") return retVal, err } @@ -37347,9 +39524,10 @@ func SpecialShiftedChebyshevPolynomialUXScalar(x *Scalar, n *Tensor)(retVal *Ten lib.AtgSpecialShiftedChebyshevPolynomialUXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialUXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialUXScalar") return retVal, err } @@ -37362,9 +39540,10 @@ func SpecialShiftedChebyshevPolynomialUXScalarOut(out *Tensor, x *Scalar, n *Ten lib.AtgSpecialShiftedChebyshevPolynomialUXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialUXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialUXScalarOut") return retVal, err } @@ -37377,9 +39556,10 @@ func SpecialShiftedChebyshevPolynomialV(x *Tensor, n *Tensor)(retVal *Tensor, er lib.AtgSpecialShiftedChebyshevPolynomialV(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialV() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialV") return retVal, err } @@ -37392,9 +39572,10 @@ func SpecialShiftedChebyshevPolynomialVNScalar(x *Tensor, n *Scalar)(retVal *Ten lib.AtgSpecialShiftedChebyshevPolynomialVNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialVNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialVNScalar") return retVal, err } @@ -37407,9 +39588,10 @@ func SpecialShiftedChebyshevPolynomialVNScalarOut(out *Tensor, x *Tensor, n *Sca lib.AtgSpecialShiftedChebyshevPolynomialVNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialVNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialVNScalarOut") return retVal, err } @@ -37422,9 +39604,10 @@ func SpecialShiftedChebyshevPolynomialVOut(out *Tensor, x *Tensor, n *Tensor)(re lib.AtgSpecialShiftedChebyshevPolynomialVOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialVOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialVOut") return retVal, err } @@ -37437,9 +39620,10 @@ func SpecialShiftedChebyshevPolynomialVXScalar(x *Scalar, n *Tensor)(retVal *Ten lib.AtgSpecialShiftedChebyshevPolynomialVXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialVXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialVXScalar") return retVal, err } @@ -37452,9 +39636,10 @@ func SpecialShiftedChebyshevPolynomialVXScalarOut(out *Tensor, x *Scalar, n *Ten lib.AtgSpecialShiftedChebyshevPolynomialVXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialVXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialVXScalarOut") return retVal, err } @@ -37467,9 +39652,10 @@ func SpecialShiftedChebyshevPolynomialW(x *Tensor, n *Tensor)(retVal *Tensor, er lib.AtgSpecialShiftedChebyshevPolynomialW(ptr, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialW() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialW") return retVal, err } @@ -37482,9 +39668,10 @@ func SpecialShiftedChebyshevPolynomialWNScalar(x *Tensor, n *Scalar)(retVal *Ten lib.AtgSpecialShiftedChebyshevPolynomialWNScalar(ptr, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialWNScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialWNScalar") return retVal, err } @@ -37497,9 +39684,10 @@ func SpecialShiftedChebyshevPolynomialWNScalarOut(out *Tensor, x *Tensor, n *Sca lib.AtgSpecialShiftedChebyshevPolynomialWNScalarOut(ptr, out.ctensor, x.ctensor, n.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialWNScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialWNScalarOut") return retVal, err } @@ -37512,9 +39700,10 @@ func SpecialShiftedChebyshevPolynomialWOut(out *Tensor, x *Tensor, n *Tensor)(re lib.AtgSpecialShiftedChebyshevPolynomialWOut(ptr, out.ctensor, x.ctensor, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialWOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialWOut") return retVal, err } @@ -37527,9 +39716,10 @@ func SpecialShiftedChebyshevPolynomialWXScalar(x *Scalar, n *Tensor)(retVal *Ten lib.AtgSpecialShiftedChebyshevPolynomialWXScalar(ptr, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialWXScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialWXScalar") return retVal, err } @@ -37542,9 +39732,10 @@ func SpecialShiftedChebyshevPolynomialWXScalarOut(out *Tensor, x *Scalar, n *Ten lib.AtgSpecialShiftedChebyshevPolynomialWXScalarOut(ptr, out.ctensor, x.cscalar, n.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialShiftedChebyshevPolynomialWXScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialShiftedChebyshevPolynomialWXScalarOut") return retVal, err } @@ -37558,9 +39749,10 @@ func(ts *Tensor) SpecialSinc(del bool)(retVal *Tensor, err error) { lib.AtgSpecialSinc(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialSinc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialSinc") return retVal, err } @@ -37574,9 +39766,10 @@ func(ts *Tensor) SpecialSincOut(out *Tensor, del bool)(retVal *Tensor, err error lib.AtgSpecialSincOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialSincOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialSincOut") return retVal, err } @@ -37590,9 +39783,10 @@ func(ts *Tensor) SpecialSoftmax(dim int64, dtype gotch.DType, del bool)(retVal * lib.AtgSpecialSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialSoftmax() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialSoftmax") return retVal, err } @@ -37605,9 +39799,10 @@ func SpecialSphericalBesselJ0(x *Tensor)(retVal *Tensor, err error) { lib.AtgSpecialSphericalBesselJ0(ptr, x.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialSphericalBesselJ0() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialSphericalBesselJ0") return retVal, err } @@ -37620,9 +39815,10 @@ func SpecialSphericalBesselJ0Out(out *Tensor, x *Tensor)(retVal *Tensor, err err lib.AtgSpecialSphericalBesselJ0Out(ptr, out.ctensor, x.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialSphericalBesselJ0Out() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialSphericalBesselJ0Out") return retVal, err } @@ -37636,9 +39832,10 @@ func(ts *Tensor) SpecialXlog1py(other *Tensor, del bool)(retVal *Tensor, err err lib.AtgSpecialXlog1py(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlog1py() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlog1py") return retVal, err } @@ -37652,9 +39849,10 @@ func(ts *Tensor) SpecialXlog1pyOtherScalar(other *Scalar, del bool)(retVal *Tens lib.AtgSpecialXlog1pyOtherScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlog1pyOtherScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlog1pyOtherScalar") return retVal, err } @@ -37668,9 +39866,10 @@ func(ts *Tensor) SpecialXlog1pyOtherScalarOut(out *Tensor, other *Scalar, del bo lib.AtgSpecialXlog1pyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlog1pyOtherScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlog1pyOtherScalarOut") return retVal, err } @@ -37684,9 +39883,10 @@ func(ts *Tensor) SpecialXlog1pyOut(out *Tensor, other *Tensor, del bool)(retVal lib.AtgSpecialXlog1pyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlog1pyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlog1pyOut") return retVal, err } @@ -37699,9 +39899,10 @@ func SpecialXlog1pySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, lib.AtgSpecialXlog1pySelfScalar(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlog1pySelfScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlog1pySelfScalar") return retVal, err } @@ -37714,9 +39915,10 @@ func SpecialXlog1pySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor) lib.AtgSpecialXlog1pySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlog1pySelfScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlog1pySelfScalarOut") return retVal, err } @@ -37730,9 +39932,10 @@ func(ts *Tensor) SpecialXlogy(other *Tensor, del bool)(retVal *Tensor, err error lib.AtgSpecialXlogy(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlogy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlogy") return retVal, err } @@ -37746,9 +39949,10 @@ func(ts *Tensor) SpecialXlogyOtherScalar(other *Scalar, del bool)(retVal *Tensor lib.AtgSpecialXlogyOtherScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlogyOtherScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlogyOtherScalar") return retVal, err } @@ -37762,9 +39966,10 @@ func(ts *Tensor) SpecialXlogyOtherScalarOut(out *Tensor, other *Scalar, del bool lib.AtgSpecialXlogyOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlogyOtherScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlogyOtherScalarOut") return retVal, err } @@ -37778,9 +39983,10 @@ func(ts *Tensor) SpecialXlogyOut(out *Tensor, other *Tensor, del bool)(retVal *T lib.AtgSpecialXlogyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlogyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlogyOut") return retVal, err } @@ -37793,9 +39999,10 @@ func SpecialXlogySelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, e lib.AtgSpecialXlogySelfScalar(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlogySelfScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlogySelfScalar") return retVal, err } @@ -37808,9 +40015,10 @@ func SpecialXlogySelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(r lib.AtgSpecialXlogySelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialXlogySelfScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialXlogySelfScalarOut") return retVal, err } @@ -37824,9 +40032,10 @@ func(ts *Tensor) SpecialZeta(other *Tensor, del bool)(retVal *Tensor, err error) lib.AtgSpecialZeta(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialZeta() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialZeta") return retVal, err } @@ -37840,9 +40049,10 @@ func(ts *Tensor) SpecialZetaOtherScalar(other *Scalar, del bool)(retVal *Tensor, lib.AtgSpecialZetaOtherScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialZetaOtherScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialZetaOtherScalar") return retVal, err } @@ -37856,9 +40066,10 @@ func(ts *Tensor) SpecialZetaOtherScalarOut(out *Tensor, other *Scalar, del bool) lib.AtgSpecialZetaOtherScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialZetaOtherScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialZetaOtherScalarOut") return retVal, err } @@ -37872,9 +40083,10 @@ func(ts *Tensor) SpecialZetaOut(out *Tensor, other *Tensor, del bool)(retVal *Te lib.AtgSpecialZetaOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialZetaOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialZetaOut") return retVal, err } @@ -37887,9 +40099,10 @@ func SpecialZetaSelfScalar(selfScalar *Scalar, other *Tensor)(retVal *Tensor, er lib.AtgSpecialZetaSelfScalar(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialZetaSelfScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialZetaSelfScalar") return retVal, err } @@ -37902,9 +40115,10 @@ func SpecialZetaSelfScalarOut(out *Tensor, selfScalar *Scalar, other *Tensor)(re lib.AtgSpecialZetaSelfScalarOut(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SpecialZetaSelfScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SpecialZetaSelfScalarOut") return retVal, err } @@ -37918,9 +40132,10 @@ func(ts *Tensor) Sqrt(del bool)(retVal *Tensor, err error) { lib.AtgSqrt(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sqrt() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sqrt") return retVal, err } @@ -37933,6 +40148,7 @@ func(ts *Tensor) Sqrt_()(err error) { lib.AtgSqrt_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sqrt_() failed: %w", err) return err } ts.ctensor = *ptr @@ -37949,9 +40165,10 @@ func(ts *Tensor) SqrtOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSqrtOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqrtOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqrtOut") return retVal, err } @@ -37965,9 +40182,10 @@ func(ts *Tensor) Square(del bool)(retVal *Tensor, err error) { lib.AtgSquare(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Square() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Square") return retVal, err } @@ -37980,6 +40198,7 @@ func(ts *Tensor) Square_()(err error) { lib.AtgSquare_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Square_() failed: %w", err) return err } ts.ctensor = *ptr @@ -37996,9 +40215,10 @@ func(ts *Tensor) SquareOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSquareOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SquareOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SquareOut") return retVal, err } @@ -38012,9 +40232,10 @@ func(ts *Tensor) Squeeze(del bool)(retVal *Tensor, err error) { lib.AtgSqueeze(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Squeeze() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Squeeze") return retVal, err } @@ -38027,6 +40248,7 @@ func(ts *Tensor) Squeeze_()(err error) { lib.AtgSqueeze_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Squeeze_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38043,9 +40265,10 @@ func(ts *Tensor) SqueezeCopy(del bool)(retVal *Tensor, err error) { lib.AtgSqueezeCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqueezeCopy") return retVal, err } @@ -38059,9 +40282,10 @@ func(ts *Tensor) SqueezeCopyDim(dim int64, del bool)(retVal *Tensor, err error) lib.AtgSqueezeCopyDim(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeCopyDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqueezeCopyDim") return retVal, err } @@ -38075,9 +40299,10 @@ func(ts *Tensor) SqueezeCopyDimOut(out *Tensor, dim int64, del bool)(retVal *Ten lib.AtgSqueezeCopyDimOut(ptr, out.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeCopyDimOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqueezeCopyDimOut") return retVal, err } @@ -38092,9 +40317,10 @@ func(ts *Tensor) SqueezeCopyDims(dim []int64, del bool)(retVal *Tensor, err erro dimLen := len(dim) lib.AtgSqueezeCopyDims(ptr, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeCopyDims() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqueezeCopyDims") return retVal, err } @@ -38109,9 +40335,10 @@ func(ts *Tensor) SqueezeCopyDimsOut(out *Tensor, dim []int64, del bool)(retVal * dimLen := len(dim) lib.AtgSqueezeCopyDimsOut(ptr, out.ctensor, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeCopyDimsOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqueezeCopyDimsOut") return retVal, err } @@ -38125,9 +40352,10 @@ func(ts *Tensor) SqueezeCopyOut(out *Tensor, del bool)(retVal *Tensor, err error lib.AtgSqueezeCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqueezeCopyOut") return retVal, err } @@ -38141,9 +40369,10 @@ func(ts *Tensor) SqueezeDim(dim int64, del bool)(retVal *Tensor, err error) { lib.AtgSqueezeDim(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqueezeDim") return retVal, err } @@ -38156,6 +40385,7 @@ func(ts *Tensor) SqueezeDim_(dim int64)(err error) { lib.AtgSqueezeDim_(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeDim_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38173,9 +40403,10 @@ func(ts *Tensor) SqueezeDims(dim []int64, del bool)(retVal *Tensor, err error) { dimLen := len(dim) lib.AtgSqueezeDims(ptr, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeDims() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SqueezeDims") return retVal, err } @@ -38189,6 +40420,7 @@ func(ts *Tensor) SqueezeDims_(dim []int64)(err error) { dimLen := len(dim) lib.AtgSqueezeDims_(ptr, ts.ctensor, dim, dimLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SqueezeDims_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38205,9 +40437,10 @@ func(ts *Tensor) Sspaddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor, lib.AtgSspaddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sspaddmm() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sspaddmm") return retVal, err } @@ -38221,9 +40454,10 @@ func(ts *Tensor) SspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)( lib.AtgSspaddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SspaddmmOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SspaddmmOut") return retVal, err } @@ -38238,9 +40472,10 @@ func Stack(tensors []*Tensor, dim int64)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgStack(ptr, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Stack() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Stack") return retVal, err } @@ -38255,9 +40490,10 @@ func StackOut(out *Tensor, tensors []*Tensor, dim int64)(retVal *Tensor, err err for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgStackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("StackOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "StackOut") return retVal, err } @@ -38273,9 +40509,10 @@ func(ts *Tensor) Std(unbiased bool, del bool)(retVal *Tensor, err error) { if unbiased { cunbiased = int32(1) } lib.AtgStd(ptr, ts.ctensor, cunbiased) if err = TorchErr(); err != nil { + err = fmt.Errorf("Std() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Std") return retVal, err } @@ -38298,9 +40535,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgStdCorrection(ptr, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("StdCorrection() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "StdCorrection") return retVal, err } @@ -38323,9 +40561,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgStdCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("StdCorrectionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "StdCorrectionOut") return retVal, err } @@ -38344,9 +40583,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgStdDim(ptr, ts.ctensor, dim, dimLen, cunbiased, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("StdDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "StdDim") return retVal, err } @@ -38362,10 +40602,11 @@ func(ts *Tensor) StdMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tens if unbiased { cunbiased = int32(1) } lib.AtgStdMean(ctensorPtr0, ts.ctensor, cunbiased) if err = TorchErr(); err != nil { + err = fmt.Errorf("StdMean() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "StdMean_0") + retVal1 = newTensor(*ctensorPtr1, "StdMean_1") return retVal0, retVal1, err } @@ -38388,10 +40629,11 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgStdMeanCorrection(ctensorPtr0, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("StdMeanCorrection() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "StdMeanCorrection_0") + retVal1 = newTensor(*ctensorPtr1, "StdMeanCorrection_1") return retVal0, retVal1, err } @@ -38414,10 +40656,11 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgStdMeanCorrectionOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("StdMeanCorrectionOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "StdMeanCorrectionOut_0") + retVal1 = newTensor(*ctensorPtr1, "StdMeanCorrectionOut_1") return retVal0, retVal1, err } @@ -38436,10 +40679,11 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgStdMeanDim(ctensorPtr0, ts.ctensor, dim, dimLen, cunbiased, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("StdMeanDim() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "StdMeanDim_0") + retVal1 = newTensor(*ctensorPtr1, "StdMeanDim_1") return retVal0, retVal1, err } @@ -38458,9 +40702,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgStdOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, cunbiased, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("StdOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "StdOut") return retVal, err } @@ -38492,9 +40737,10 @@ creturnComplex := int32(0) if returnComplex { creturnComplex = int32(1) } lib.AtgStft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, cnormalized, conesided, creturnComplex) if err = TorchErr(); err != nil { + err = fmt.Errorf("Stft() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Stft") return retVal, err } @@ -38528,9 +40774,10 @@ creturnComplex := int32(0) if returnComplex { creturnComplex = int32(1) } lib.AtgStftCenter(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, ccenter, padMode, cnormalized, conesided, creturnComplex) if err = TorchErr(); err != nil { + err = fmt.Errorf("StftCenter() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "StftCenter") return retVal, err } @@ -38544,9 +40791,10 @@ func(ts *Tensor) Sub(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSub(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sub() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sub") return retVal, err } @@ -38559,6 +40807,7 @@ func(ts *Tensor) Sub_(other *Tensor)(err error) { lib.AtgSub_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sub_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38575,9 +40824,10 @@ func(ts *Tensor) SubOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, er lib.AtgSubOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SubOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SubOut") return retVal, err } @@ -38591,9 +40841,10 @@ func(ts *Tensor) SubScalar(other *Scalar, del bool)(retVal *Tensor, err error) { lib.AtgSubScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SubScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SubScalar") return retVal, err } @@ -38606,6 +40857,7 @@ func(ts *Tensor) SubScalar_(other *Scalar)(err error) { lib.AtgSubScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SubScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38622,9 +40874,10 @@ func(ts *Tensor) SubScalarOut(out *Tensor, other *Scalar, del bool)(retVal *Tens lib.AtgSubScalarOut(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SubScalarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SubScalarOut") return retVal, err } @@ -38638,9 +40891,10 @@ func(ts *Tensor) Subtract(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgSubtract(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Subtract() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Subtract") return retVal, err } @@ -38653,6 +40907,7 @@ func(ts *Tensor) Subtract_(other *Tensor)(err error) { lib.AtgSubtract_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Subtract_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38669,9 +40924,10 @@ func(ts *Tensor) SubtractOut(out *Tensor, other *Tensor, del bool)(retVal *Tenso lib.AtgSubtractOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("SubtractOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SubtractOut") return retVal, err } @@ -38685,9 +40941,10 @@ func(ts *Tensor) SubtractScalar(other *Scalar, del bool)(retVal *Tensor, err err lib.AtgSubtractScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SubtractScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SubtractScalar") return retVal, err } @@ -38700,6 +40957,7 @@ func(ts *Tensor) SubtractScalar_(other *Scalar)(err error) { lib.AtgSubtractScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("SubtractScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38716,9 +40974,10 @@ func(ts *Tensor) Sum(dtype gotch.DType, del bool)(retVal *Tensor, err error) { lib.AtgSum(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Sum() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Sum") return retVal, err } @@ -38735,9 +40994,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgSumDimIntlist(ptr, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SumDimIntlist() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SumDimIntlist") return retVal, err } @@ -38754,9 +41014,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgSumIntlistOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ckeepdim, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SumIntlistOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SumIntlistOut") return retVal, err } @@ -38770,9 +41031,10 @@ func(ts *Tensor) SumOut(out *Tensor, dtype gotch.DType, del bool)(retVal *Tensor lib.AtgSumOut(ptr, out.ctensor, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("SumOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SumOut") return retVal, err } @@ -38787,9 +41049,10 @@ func(ts *Tensor) SumToSize(size []int64, del bool)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgSumToSize(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("SumToSize() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "SumToSize") return retVal, err } @@ -38808,11 +41071,12 @@ ccomputeUv := int32(0) if computeUv { ccomputeUv = int32(1) } lib.AtgSvd(ctensorPtr0, ts.ctensor, csome, ccomputeUv) if err = TorchErr(); err != nil { + err = fmt.Errorf("Svd() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "Svd_0") + retVal1 = newTensor(*ctensorPtr1, "Svd_1") + retVal2 = newTensor(*ctensorPtr2, "Svd_2") return retVal0, retVal1, retVal2, err } @@ -38831,11 +41095,12 @@ ccomputeUv := int32(0) if computeUv { ccomputeUv = int32(1) } lib.AtgSvdU(ctensorPtr0, u.ctensor, s.ctensor, v.ctensor, ts.ctensor, csome, ccomputeUv) if err = TorchErr(); err != nil { + err = fmt.Errorf("SvdU() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "SvdU_0") + retVal1 = newTensor(*ctensorPtr1, "SvdU_1") + retVal2 = newTensor(*ctensorPtr2, "SvdU_2") return retVal0, retVal1, retVal2, err } @@ -38849,9 +41114,10 @@ func(ts *Tensor) Swapaxes(axis0 int64, axis1 int64, del bool)(retVal *Tensor, er lib.AtgSwapaxes(ptr, ts.ctensor, axis0, axis1) if err = TorchErr(); err != nil { + err = fmt.Errorf("Swapaxes() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Swapaxes") return retVal, err } @@ -38864,6 +41130,7 @@ func(ts *Tensor) Swapaxes_(axis0 int64, axis1 int64)(err error) { lib.AtgSwapaxes_(ptr, ts.ctensor, axis0, axis1) if err = TorchErr(); err != nil { + err = fmt.Errorf("Swapaxes_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38880,9 +41147,10 @@ func(ts *Tensor) Swapdims(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err lib.AtgSwapdims(ptr, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("Swapdims() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Swapdims") return retVal, err } @@ -38895,6 +41163,7 @@ func(ts *Tensor) Swapdims_(dim0 int64, dim1 int64)(err error) { lib.AtgSwapdims_(ptr, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("Swapdims_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38911,9 +41180,10 @@ func(ts *Tensor) T(del bool)(retVal *Tensor, err error) { lib.AtgT(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("T() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "T") return retVal, err } @@ -38926,6 +41196,7 @@ func(ts *Tensor) T_()(err error) { lib.AtgT_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("T_() failed: %w", err) return err } ts.ctensor = *ptr @@ -38942,9 +41213,10 @@ func(ts *Tensor) TCopy(del bool)(retVal *Tensor, err error) { lib.AtgTCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TCopy") return retVal, err } @@ -38958,9 +41230,10 @@ func(ts *Tensor) TCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgTCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TCopyOut") return retVal, err } @@ -38974,9 +41247,10 @@ func(ts *Tensor) Take(index *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgTake(ptr, ts.ctensor, index.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Take() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Take") return retVal, err } @@ -38996,9 +41270,10 @@ func(ts *Tensor) TakeAlongDim(indices *Tensor, dim []int64, del bool)(retVal *Te } lib.AtgTakeAlongDim(ptr, ts.ctensor, indices.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("TakeAlongDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TakeAlongDim") return retVal, err } @@ -39018,9 +41293,10 @@ func(ts *Tensor) TakeAlongDimOut(out *Tensor, indices *Tensor, dim []int64, del } lib.AtgTakeAlongDimOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("TakeAlongDimOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TakeAlongDimOut") return retVal, err } @@ -39034,9 +41310,10 @@ func(ts *Tensor) TakeOut(out *Tensor, index *Tensor, del bool)(retVal *Tensor, e lib.AtgTakeOut(ptr, out.ctensor, ts.ctensor, index.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TakeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TakeOut") return retVal, err } @@ -39050,9 +41327,10 @@ func(ts *Tensor) Tan(del bool)(retVal *Tensor, err error) { lib.AtgTan(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Tan() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Tan") return retVal, err } @@ -39065,6 +41343,7 @@ func(ts *Tensor) Tan_()(err error) { lib.AtgTan_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Tan_() failed: %w", err) return err } ts.ctensor = *ptr @@ -39081,9 +41360,10 @@ func(ts *Tensor) TanOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgTanOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TanOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TanOut") return retVal, err } @@ -39097,9 +41377,10 @@ func(ts *Tensor) Tanh(del bool)(retVal *Tensor, err error) { lib.AtgTanh(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Tanh() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Tanh") return retVal, err } @@ -39112,6 +41393,7 @@ func(ts *Tensor) Tanh_()(err error) { lib.AtgTanh_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Tanh_() failed: %w", err) return err } ts.ctensor = *ptr @@ -39127,9 +41409,10 @@ func TanhBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor, err error) lib.AtgTanhBackward(ptr, gradOutput.ctensor, output.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TanhBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TanhBackward") return retVal, err } @@ -39142,9 +41425,10 @@ func TanhBackwardGradInput(gradInput *Tensor, gradOutput *Tensor, output *Tensor lib.AtgTanhBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TanhBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TanhBackwardGradInput") return retVal, err } @@ -39158,9 +41442,10 @@ func(ts *Tensor) TanhOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgTanhOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TanhOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TanhOut") return retVal, err } @@ -39176,9 +41461,10 @@ func(ts *Tensor) Tensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, d dimsOtherLen := len(dimsOther) lib.AtgTensordot(ptr, ts.ctensor, other.ctensor, dimsSelf, dimsSelfLen, dimsOther, dimsOtherLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Tensordot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Tensordot") return retVal, err } @@ -39194,9 +41480,10 @@ func(ts *Tensor) TensordotOut(out *Tensor, other *Tensor, dimsSelf []int64, dims dimsOtherLen := len(dimsOther) lib.AtgTensordotOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dimsSelf, dimsSelfLen, dimsOther, dimsOtherLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("TensordotOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TensordotOut") return retVal, err } @@ -39210,9 +41497,10 @@ func(ts *Tensor) Threshold(threshold *Scalar, value *Scalar, del bool)(retVal *T lib.AtgThreshold(ptr, ts.ctensor, threshold.cscalar, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Threshold() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Threshold") return retVal, err } @@ -39225,6 +41513,7 @@ func(ts *Tensor) Threshold_(threshold *Scalar, value *Scalar)(err error) { lib.AtgThreshold_(ptr, ts.ctensor, threshold.cscalar, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("Threshold_() failed: %w", err) return err } ts.ctensor = *ptr @@ -39241,9 +41530,10 @@ func(ts *Tensor) ThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bo lib.AtgThresholdBackward(ptr, gradOutput.ctensor, ts.ctensor, threshold.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ThresholdBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ThresholdBackward") return retVal, err } @@ -39257,9 +41547,10 @@ func(ts *Tensor) ThresholdBackwardGradInput(gradInput *Tensor, gradOutput *Tenso lib.AtgThresholdBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, threshold.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ThresholdBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ThresholdBackwardGradInput") return retVal, err } @@ -39273,9 +41564,10 @@ func(ts *Tensor) ThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del lib.AtgThresholdOut(ptr, out.ctensor, ts.ctensor, threshold.cscalar, value.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("ThresholdOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ThresholdOut") return retVal, err } @@ -39290,9 +41582,10 @@ func(ts *Tensor) Tile(dims []int64, del bool)(retVal *Tensor, err error) { dimsLen := len(dims) lib.AtgTile(ptr, ts.ctensor, dims, dimsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Tile() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Tile") return retVal, err } @@ -39306,9 +41599,10 @@ func(ts *Tensor) To(device gotch.Device, del bool)(retVal *Tensor, err error) { lib.AtgTo(ptr, ts.ctensor, device.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("To() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "To") return retVal, err } @@ -39322,9 +41616,10 @@ func(ts *Tensor) ToDense(dtype gotch.DType, del bool)(retVal *Tensor, err error) lib.AtgToDense(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToDense() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToDense") return retVal, err } @@ -39337,9 +41632,10 @@ func ToDenseBackward(grad *Tensor, input *Tensor)(retVal *Tensor, err error) { lib.AtgToDenseBackward(ptr, grad.ctensor, input.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToDenseBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToDenseBackward") return retVal, err } @@ -39357,9 +41653,10 @@ ccopy := int32(0) if copy { ccopy = int32(1) } lib.AtgToDevice(ptr, ts.ctensor, device.CInt(), dtype.CInt(), cnonBlocking, ccopy) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToDevice() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToDevice") return retVal, err } @@ -39377,9 +41674,10 @@ ccopy := int32(0) if copy { ccopy = int32(1) } lib.AtgToDtype(ptr, ts.ctensor, dtype.CInt(), cnonBlocking, ccopy) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToDtype") return retVal, err } @@ -39397,9 +41695,10 @@ ccopy := int32(0) if copy { ccopy = int32(1) } lib.AtgToDtypeLayout(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking, ccopy) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToDtypeLayout() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToDtypeLayout") return retVal, err } @@ -39413,9 +41712,10 @@ func(ts *Tensor) ToMkldnn(dtype gotch.DType, del bool)(retVal *Tensor, err error lib.AtgToMkldnn(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToMkldnn() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToMkldnn") return retVal, err } @@ -39428,9 +41728,10 @@ func ToMkldnnBackward(grad *Tensor, input *Tensor)(retVal *Tensor, err error) { lib.AtgToMkldnnBackward(ptr, grad.ctensor, input.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToMkldnnBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToMkldnnBackward") return retVal, err } @@ -39444,9 +41745,10 @@ func(ts *Tensor) ToMkldnnOut(out *Tensor, dtype gotch.DType, del bool)(retVal *T lib.AtgToMkldnnOut(ptr, out.ctensor, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToMkldnnOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToMkldnnOut") return retVal, err } @@ -39464,9 +41766,10 @@ ccopy := int32(0) if copy { ccopy = int32(1) } lib.AtgToOther(ptr, ts.ctensor, other.ctensor, cnonBlocking, ccopy) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToOther() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToOther") return retVal, err } @@ -39481,9 +41784,10 @@ func(ts *Tensor) ToPaddedTensor(padding float64, outputSize []int64, del bool)(r outputSizeLen := len(outputSize) lib.AtgToPaddedTensor(ptr, ts.ctensor, padding, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToPaddedTensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToPaddedTensor") return retVal, err } @@ -39498,9 +41802,10 @@ func(ts *Tensor) ToPaddedTensorOut(out *Tensor, padding float64, outputSize []in outputSizeLen := len(outputSize) lib.AtgToPaddedTensorOut(ptr, out.ctensor, ts.ctensor, padding, outputSize, outputSizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToPaddedTensorOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToPaddedTensorOut") return retVal, err } @@ -39521,9 +41826,10 @@ var cdenseDimVal int64 = 0 } lib.AtgToSparse(ptr, ts.ctensor, int8(layout), blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparse() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparse") return retVal, err } @@ -39544,9 +41850,10 @@ var cdenseDimVal int64 = 0 } lib.AtgToSparseBsc(ptr, ts.ctensor, blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseBsc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseBsc") return retVal, err } @@ -39567,9 +41874,10 @@ var cdenseDimVal int64 = 0 } lib.AtgToSparseBscOut(ptr, out.ctensor, ts.ctensor, blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseBscOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseBscOut") return retVal, err } @@ -39590,9 +41898,10 @@ var cdenseDimVal int64 = 0 } lib.AtgToSparseBsr(ptr, ts.ctensor, blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseBsr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseBsr") return retVal, err } @@ -39613,9 +41922,10 @@ var cdenseDimVal int64 = 0 } lib.AtgToSparseBsrOut(ptr, out.ctensor, ts.ctensor, blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseBsrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseBsrOut") return retVal, err } @@ -39635,9 +41945,10 @@ func(ts *Tensor) ToSparseCsc(denseDim []int64, del bool)(retVal *Tensor, err err } lib.AtgToSparseCsc(ptr, ts.ctensor, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseCsc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseCsc") return retVal, err } @@ -39657,9 +41968,10 @@ func(ts *Tensor) ToSparseCscOut(out *Tensor, denseDim []int64, del bool)(retVal } lib.AtgToSparseCscOut(ptr, out.ctensor, ts.ctensor, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseCscOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseCscOut") return retVal, err } @@ -39679,9 +41991,10 @@ func(ts *Tensor) ToSparseCsr(denseDim []int64, del bool)(retVal *Tensor, err err } lib.AtgToSparseCsr(ptr, ts.ctensor, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseCsr() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseCsr") return retVal, err } @@ -39701,9 +42014,10 @@ func(ts *Tensor) ToSparseCsrOut(out *Tensor, denseDim []int64, del bool)(retVal } lib.AtgToSparseCsrOut(ptr, out.ctensor, ts.ctensor, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseCsrOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseCsrOut") return retVal, err } @@ -39724,9 +42038,10 @@ var cdenseDimVal int64 = 0 } lib.AtgToSparseOut(ptr, out.ctensor, ts.ctensor, int8(layout), blocksize, blocksizeLen, cdenseDimVal, cdenseDimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseOut") return retVal, err } @@ -39740,9 +42055,10 @@ func(ts *Tensor) ToSparseSparseDim(sparseDim int64, del bool)(retVal *Tensor, er lib.AtgToSparseSparseDim(ptr, ts.ctensor, sparseDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseSparseDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseSparseDim") return retVal, err } @@ -39756,9 +42072,10 @@ func(ts *Tensor) ToSparseSparseDimOut(out *Tensor, sparseDim int64, del bool)(re lib.AtgToSparseSparseDimOut(ptr, out.ctensor, ts.ctensor, sparseDim) if err = TorchErr(); err != nil { + err = fmt.Errorf("ToSparseSparseDimOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ToSparseSparseDimOut") return retVal, err } @@ -39776,10 +42093,11 @@ csorted := int32(0) if sorted { csorted = int32(1) } lib.AtgTopk(ctensorPtr0, ts.ctensor, k, dim, clargest, csorted) if err = TorchErr(); err != nil { + err = fmt.Errorf("Topk() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "Topk_0") + retVal1 = newTensor(*ctensorPtr1, "Topk_1") return retVal0, retVal1, err } @@ -39797,10 +42115,11 @@ csorted := int32(0) if sorted { csorted = int32(1) } lib.AtgTopkValues(ctensorPtr0, values.ctensor, indices.ctensor, ts.ctensor, k, dim, clargest, csorted) if err = TorchErr(); err != nil { + err = fmt.Errorf("TopkValues() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "TopkValues_0") + retVal1 = newTensor(*ctensorPtr1, "TopkValues_1") return retVal0, retVal1, err } @@ -39814,9 +42133,10 @@ func(ts *Tensor) Totype(scalarType gotch.DType, del bool)(retVal *Tensor, err er lib.AtgTotype(ptr, ts.ctensor, scalarType.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Totype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Totype") return retVal, err } @@ -39830,9 +42150,10 @@ func(ts *Tensor) Trace(del bool)(retVal *Tensor, err error) { lib.AtgTrace(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Trace() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Trace") return retVal, err } @@ -39846,9 +42167,10 @@ func TraceBackward(grad *Tensor, sizes []int64)(retVal *Tensor, err error) { sizesLen := len(sizes) lib.AtgTraceBackward(ptr, grad.ctensor, sizes, sizesLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("TraceBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TraceBackward") return retVal, err } @@ -39862,9 +42184,10 @@ func(ts *Tensor) TraceOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgTraceOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TraceOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TraceOut") return retVal, err } @@ -39878,9 +42201,10 @@ func(ts *Tensor) Transpose(dim0 int64, dim1 int64, del bool)(retVal *Tensor, err lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("Transpose() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Transpose") return retVal, err } @@ -39893,6 +42217,7 @@ func(ts *Tensor) Transpose_(dim0 int64, dim1 int64)(err error) { lib.AtgTranspose_(ptr, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("Transpose_() failed: %w", err) return err } ts.ctensor = *ptr @@ -39909,9 +42234,10 @@ func(ts *Tensor) TransposeCopy(dim0 int64, dim1 int64, del bool)(retVal *Tensor, lib.AtgTransposeCopy(ptr, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("TransposeCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TransposeCopy") return retVal, err } @@ -39925,9 +42251,10 @@ func(ts *Tensor) TransposeCopyIntOut(out *Tensor, dim0 int64, dim1 int64, del bo lib.AtgTransposeCopyIntOut(ptr, out.ctensor, ts.ctensor, dim0, dim1) if err = TorchErr(); err != nil { + err = fmt.Errorf("TransposeCopyIntOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TransposeCopyIntOut") return retVal, err } @@ -39940,9 +42267,10 @@ func Trapezoid(y *Tensor, dim int64)(retVal *Tensor, err error) { lib.AtgTrapezoid(ptr, y.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Trapezoid() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Trapezoid") return retVal, err } @@ -39955,9 +42283,10 @@ func TrapezoidX(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err error) { lib.AtgTrapezoidX(ptr, y.ctensor, x.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrapezoidX() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TrapezoidX") return retVal, err } @@ -39970,9 +42299,10 @@ func Trapz(y *Tensor, x *Tensor, dim int64)(retVal *Tensor, err error) { lib.AtgTrapz(ptr, y.ctensor, x.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Trapz() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Trapz") return retVal, err } @@ -39985,9 +42315,10 @@ func TrapzDx(y *Tensor, dx float64, dim int64)(retVal *Tensor, err error) { lib.AtgTrapzDx(ptr, y.ctensor, dx, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrapzDx() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TrapzDx") return retVal, err } @@ -40007,10 +42338,11 @@ cunitriangular := int32(0) if unitriangular { cunitriangular = int32(1) } lib.AtgTriangularSolve(ctensorPtr0, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular) if err = TorchErr(); err != nil { + err = fmt.Errorf("TriangularSolve() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "TriangularSolve_0") + retVal1 = newTensor(*ctensorPtr1, "TriangularSolve_1") return retVal0, retVal1, err } @@ -40030,10 +42362,11 @@ cunitriangular := int32(0) if unitriangular { cunitriangular = int32(1) } lib.AtgTriangularSolveX(ctensorPtr0, x.ctensor, m.ctensor, ts.ctensor, a.ctensor, cupper, ctranspose, cunitriangular) if err = TorchErr(); err != nil { + err = fmt.Errorf("TriangularSolveX() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "TriangularSolveX_0") + retVal1 = newTensor(*ctensorPtr1, "TriangularSolveX_1") return retVal0, retVal1, err } @@ -40047,9 +42380,10 @@ func(ts *Tensor) Tril(diagonal int64, del bool)(retVal *Tensor, err error) { lib.AtgTril(ptr, ts.ctensor, diagonal) if err = TorchErr(); err != nil { + err = fmt.Errorf("Tril() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Tril") return retVal, err } @@ -40062,6 +42396,7 @@ func(ts *Tensor) Tril_(diagonal int64)(err error) { lib.AtgTril_(ptr, ts.ctensor, diagonal) if err = TorchErr(); err != nil { + err = fmt.Errorf("Tril_() failed: %w", err) return err } ts.ctensor = *ptr @@ -40077,9 +42412,10 @@ func TrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, op lib.AtgTrilIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrilIndices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TrilIndices") return retVal, err } @@ -40092,9 +42428,10 @@ func TrilIndicesOut(out *Tensor, row int64, col int64, offset int64)(retVal *Ten lib.AtgTrilIndicesOut(ptr, out.ctensor, row, col, offset) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrilIndicesOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TrilIndicesOut") return retVal, err } @@ -40108,9 +42445,10 @@ func(ts *Tensor) TrilOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, lib.AtgTrilOut(ptr, out.ctensor, ts.ctensor, diagonal) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrilOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TrilOut") return retVal, err } @@ -40125,9 +42463,10 @@ func TripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margi if swap { cswap = int32(1) } lib.AtgTripletMarginLoss(ptr, anchor.ctensor, positive.ctensor, negative.ctensor, margin, p, eps, cswap, reduction) if err = TorchErr(); err != nil { + err = fmt.Errorf("TripletMarginLoss() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TripletMarginLoss") return retVal, err } @@ -40141,9 +42480,10 @@ func(ts *Tensor) Triu(diagonal int64, del bool)(retVal *Tensor, err error) { lib.AtgTriu(ptr, ts.ctensor, diagonal) if err = TorchErr(); err != nil { + err = fmt.Errorf("Triu() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Triu") return retVal, err } @@ -40156,6 +42496,7 @@ func(ts *Tensor) Triu_(diagonal int64)(err error) { lib.AtgTriu_(ptr, ts.ctensor, diagonal) if err = TorchErr(); err != nil { + err = fmt.Errorf("Triu_() failed: %w", err) return err } ts.ctensor = *ptr @@ -40171,9 +42512,10 @@ func TriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, op lib.AtgTriuIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("TriuIndices() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TriuIndices") return retVal, err } @@ -40186,9 +42528,10 @@ func TriuIndicesOut(out *Tensor, row int64, col int64, offset int64)(retVal *Ten lib.AtgTriuIndicesOut(ptr, out.ctensor, row, col, offset) if err = TorchErr(); err != nil { + err = fmt.Errorf("TriuIndicesOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TriuIndicesOut") return retVal, err } @@ -40202,9 +42545,10 @@ func(ts *Tensor) TriuOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor, lib.AtgTriuOut(ptr, out.ctensor, ts.ctensor, diagonal) if err = TorchErr(); err != nil { + err = fmt.Errorf("TriuOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TriuOut") return retVal, err } @@ -40218,9 +42562,10 @@ func(ts *Tensor) TrueDivide(other *Tensor, del bool)(retVal *Tensor, err error) lib.AtgTrueDivide(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrueDivide() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TrueDivide") return retVal, err } @@ -40233,6 +42578,7 @@ func(ts *Tensor) TrueDivide_(other *Tensor)(err error) { lib.AtgTrueDivide_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrueDivide_() failed: %w", err) return err } ts.ctensor = *ptr @@ -40249,9 +42595,10 @@ func(ts *Tensor) TrueDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Ten lib.AtgTrueDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrueDivideOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TrueDivideOut") return retVal, err } @@ -40265,9 +42612,10 @@ func(ts *Tensor) TrueDivideScalar(other *Scalar, del bool)(retVal *Tensor, err e lib.AtgTrueDivideScalar(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrueDivideScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TrueDivideScalar") return retVal, err } @@ -40280,6 +42628,7 @@ func(ts *Tensor) TrueDivideScalar_(other *Scalar)(err error) { lib.AtgTrueDivideScalar_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("TrueDivideScalar_() failed: %w", err) return err } ts.ctensor = *ptr @@ -40296,9 +42645,10 @@ func(ts *Tensor) Trunc(del bool)(retVal *Tensor, err error) { lib.AtgTrunc(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Trunc() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Trunc") return retVal, err } @@ -40311,6 +42661,7 @@ func(ts *Tensor) Trunc_()(err error) { lib.AtgTrunc_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Trunc_() failed: %w", err) return err } ts.ctensor = *ptr @@ -40327,9 +42678,10 @@ func(ts *Tensor) TruncOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgTruncOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TruncOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TruncOut") return retVal, err } @@ -40343,9 +42695,10 @@ func(ts *Tensor) TypeAs(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgTypeAs(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("TypeAs() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "TypeAs") return retVal, err } @@ -40360,9 +42713,10 @@ func(ts *Tensor) Unflatten(dim int64, sizes []int64, del bool)(retVal *Tensor, e sizesLen := len(sizes) lib.AtgUnflatten(ptr, ts.ctensor, dim, sizes, sizesLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("Unflatten() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Unflatten") return retVal, err } @@ -40376,9 +42730,10 @@ func(ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool)(retVa lib.AtgUnfold(ptr, ts.ctensor, dimension, size, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("Unfold() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Unfold") return retVal, err } @@ -40392,9 +42747,10 @@ func UnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, s inputSizesLen := len(inputSizes) lib.AtgUnfoldBackward(ptr, gradIn.ctensor, inputSizes, inputSizesLen, dim, size, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("UnfoldBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UnfoldBackward") return retVal, err } @@ -40408,9 +42764,10 @@ func UnfoldBackwardOut(out *Tensor, gradIn *Tensor, inputSizes []int64, dim int6 inputSizesLen := len(inputSizes) lib.AtgUnfoldBackwardOut(ptr, out.ctensor, gradIn.ctensor, inputSizes, inputSizesLen, dim, size, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("UnfoldBackwardOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UnfoldBackwardOut") return retVal, err } @@ -40424,9 +42781,10 @@ func(ts *Tensor) UnfoldCopy(dimension int64, size int64, step int64, del bool)(r lib.AtgUnfoldCopy(ptr, ts.ctensor, dimension, size, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("UnfoldCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UnfoldCopy") return retVal, err } @@ -40440,9 +42798,10 @@ func(ts *Tensor) UnfoldCopyOut(out *Tensor, dimension int64, size int64, step in lib.AtgUnfoldCopyOut(ptr, out.ctensor, ts.ctensor, dimension, size, step) if err = TorchErr(); err != nil { + err = fmt.Errorf("UnfoldCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UnfoldCopyOut") return retVal, err } @@ -40456,9 +42815,10 @@ func(ts *Tensor) Uniform(from float64, to float64, del bool)(retVal *Tensor, err lib.AtgUniform(ptr, ts.ctensor, from, to) if err = TorchErr(); err != nil { + err = fmt.Errorf("Uniform() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Uniform") return retVal, err } @@ -40471,6 +42831,7 @@ func(ts *Tensor) Uniform_(from float64, to float64)(err error) { lib.AtgUniform_(ptr, ts.ctensor, from, to) if err = TorchErr(); err != nil { + err = fmt.Errorf("Uniform_() failed: %w", err) return err } ts.ctensor = *ptr @@ -40487,9 +42848,10 @@ func(ts *Tensor) UniformOut(out *Tensor, from float64, to float64, del bool)(ret lib.AtgUniformOut(ptr, out.ctensor, ts.ctensor, from, to) if err = TorchErr(); err != nil { + err = fmt.Errorf("UniformOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UniformOut") return retVal, err } @@ -40514,11 +42876,12 @@ var cdimVal int64 = 0 } lib.AtgUniqueConsecutive(ctensorPtr0, ts.ctensor, creturnInverse, creturnCounts, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UniqueConsecutive() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "UniqueConsecutive_0") + retVal1 = newTensor(*ctensorPtr1, "UniqueConsecutive_1") + retVal2 = newTensor(*ctensorPtr2, "UniqueConsecutive_2") return retVal0, retVal1, retVal2, err } @@ -40543,11 +42906,12 @@ var cdimVal int64 = 0 } lib.AtgUniqueConsecutiveOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, ts.ctensor, creturnInverse, creturnCounts, cdimVal, cdimNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UniqueConsecutiveOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "UniqueConsecutiveOut_0") + retVal1 = newTensor(*ctensorPtr1, "UniqueConsecutiveOut_1") + retVal2 = newTensor(*ctensorPtr2, "UniqueConsecutiveOut_2") return retVal0, retVal1, retVal2, err } @@ -40568,11 +42932,12 @@ creturnCounts := int32(0) if returnCounts { creturnCounts = int32(1) } lib.AtgUniqueDim(ctensorPtr0, ts.ctensor, dim, csorted, creturnInverse, creturnCounts) if err = TorchErr(); err != nil { + err = fmt.Errorf("UniqueDim() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "UniqueDim_0") + retVal1 = newTensor(*ctensorPtr1, "UniqueDim_1") + retVal2 = newTensor(*ctensorPtr2, "UniqueDim_2") return retVal0, retVal1, retVal2, err } @@ -40591,11 +42956,12 @@ creturnCounts := int32(0) if returnCounts { creturnCounts = int32(1) } lib.AtgUniqueDimConsecutive(ctensorPtr0, ts.ctensor, dim, creturnInverse, creturnCounts) if err = TorchErr(); err != nil { + err = fmt.Errorf("UniqueDimConsecutive() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "UniqueDimConsecutive_0") + retVal1 = newTensor(*ctensorPtr1, "UniqueDimConsecutive_1") + retVal2 = newTensor(*ctensorPtr2, "UniqueDimConsecutive_2") return retVal0, retVal1, retVal2, err } @@ -40614,11 +42980,12 @@ creturnCounts := int32(0) if returnCounts { creturnCounts = int32(1) } lib.AtgUniqueDimConsecutiveOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, ts.ctensor, dim, creturnInverse, creturnCounts) if err = TorchErr(); err != nil { + err = fmt.Errorf("UniqueDimConsecutiveOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "UniqueDimConsecutiveOut_0") + retVal1 = newTensor(*ctensorPtr1, "UniqueDimConsecutiveOut_1") + retVal2 = newTensor(*ctensorPtr2, "UniqueDimConsecutiveOut_2") return retVal0, retVal1, retVal2, err } @@ -40639,11 +43006,12 @@ creturnCounts := int32(0) if returnCounts { creturnCounts = int32(1) } lib.AtgUniqueDimOut(ctensorPtr0, out0.ctensor, out1.ctensor, out2.ctensor, ts.ctensor, dim, csorted, creturnInverse, creturnCounts) if err = TorchErr(); err != nil { + err = fmt.Errorf("UniqueDimOut() failed: %w", err) return retVal0, retVal1, retVal2, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} - retVal2 = &Tensor{ctensor: *ctensorPtr2} + retVal0 = newTensor(*ctensorPtr0, "UniqueDimOut_0") + retVal1 = newTensor(*ctensorPtr1, "UniqueDimOut_1") + retVal2 = newTensor(*ctensorPtr2, "UniqueDimOut_2") return retVal0, retVal1, retVal2, err } @@ -40657,9 +43025,10 @@ func(ts *Tensor) Unsqueeze(dim int64, del bool)(retVal *Tensor, err error) { lib.AtgUnsqueeze(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Unsqueeze() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Unsqueeze") return retVal, err } @@ -40672,6 +43041,7 @@ func(ts *Tensor) Unsqueeze_(dim int64)(err error) { lib.AtgUnsqueeze_(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("Unsqueeze_() failed: %w", err) return err } ts.ctensor = *ptr @@ -40688,9 +43058,10 @@ func(ts *Tensor) UnsqueezeCopy(dim int64, del bool)(retVal *Tensor, err error) { lib.AtgUnsqueezeCopy(ptr, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("UnsqueezeCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UnsqueezeCopy") return retVal, err } @@ -40704,9 +43075,10 @@ func(ts *Tensor) UnsqueezeCopyOut(out *Tensor, dim int64, del bool)(retVal *Tens lib.AtgUnsqueezeCopyOut(ptr, out.ctensor, ts.ctensor, dim) if err = TorchErr(); err != nil { + err = fmt.Errorf("UnsqueezeCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UnsqueezeCopyOut") return retVal, err } @@ -40735,9 +43107,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBicubic2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBicubic2d") return retVal, err } @@ -40766,9 +43139,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBicubic2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBicubic2dBackward") return retVal, err } @@ -40797,9 +43171,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleBicubic2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBicubic2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBicubic2dBackwardGradInput") return retVal, err } @@ -40828,9 +43203,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBicubic2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBicubic2dOut") return retVal, err } @@ -40847,9 +43223,10 @@ calignCorners := int32(0) scaleFactorsLen := len(scaleFactors) lib.AtgUpsampleBicubic2dVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBicubic2dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBicubic2dVec") return retVal, err } @@ -40878,9 +43255,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBilinear2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBilinear2d") return retVal, err } @@ -40909,9 +43287,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBilinear2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBilinear2dBackward") return retVal, err } @@ -40940,9 +43319,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleBilinear2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBilinear2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBilinear2dBackwardGradInput") return retVal, err } @@ -40971,9 +43351,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBilinear2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBilinear2dOut") return retVal, err } @@ -40990,9 +43371,10 @@ calignCorners := int32(0) scaleFactorsLen := len(scaleFactors) lib.AtgUpsampleBilinear2dVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleBilinear2dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleBilinear2dVec") return retVal, err } @@ -41015,9 +43397,10 @@ var cscalesVal float64 = 0.0 } lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleLinear1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleLinear1d") return retVal, err } @@ -41040,9 +43423,10 @@ var cscalesVal float64 = 0.0 } lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleLinear1dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleLinear1dBackward") return retVal, err } @@ -41065,9 +43449,10 @@ var cscalesVal float64 = 0.0 } lib.AtgUpsampleLinear1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleLinear1dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleLinear1dBackwardGradInput") return retVal, err } @@ -41090,9 +43475,10 @@ var cscalesVal float64 = 0.0 } lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleLinear1dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleLinear1dOut") return retVal, err } @@ -41109,9 +43495,10 @@ calignCorners := int32(0) scaleFactorsLen := len(scaleFactors) lib.AtgUpsampleLinear1dVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleLinear1dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleLinear1dVec") return retVal, err } @@ -41132,9 +43519,10 @@ var cscalesVal float64 = 0.0 } lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest1d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest1d") return retVal, err } @@ -41155,9 +43543,10 @@ var cscalesVal float64 = 0.0 } lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest1dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest1dBackward") return retVal, err } @@ -41178,9 +43567,10 @@ var cscalesVal float64 = 0.0 } lib.AtgUpsampleNearest1dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest1dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest1dBackwardGradInput") return retVal, err } @@ -41201,9 +43591,10 @@ var cscalesVal float64 = 0.0 } lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest1dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest1dOut") return retVal, err } @@ -41218,9 +43609,10 @@ func UpsampleNearest1dVec(input *Tensor, outputSize []int64, scaleFactors []floa scaleFactorsLen := len(scaleFactors) lib.AtgUpsampleNearest1dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest1dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest1dVec") return retVal, err } @@ -41247,9 +43639,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest2d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest2d") return retVal, err } @@ -41276,9 +43669,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest2dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest2dBackward") return retVal, err } @@ -41305,9 +43699,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleNearest2dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest2dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest2dBackwardGradInput") return retVal, err } @@ -41334,9 +43729,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest2dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest2dOut") return retVal, err } @@ -41351,9 +43747,10 @@ func UpsampleNearest2dVec(input *Tensor, outputSize []int64, scaleFactors []floa scaleFactorsLen := len(scaleFactors) lib.AtgUpsampleNearest2dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest2dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest2dVec") return retVal, err } @@ -41386,9 +43783,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, outputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest3d") return retVal, err } @@ -41421,9 +43819,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest3dBackward") return retVal, err } @@ -41456,9 +43855,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleNearest3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest3dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest3dBackwardGradInput") return retVal, err } @@ -41491,9 +43891,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest3dOut") return retVal, err } @@ -41508,9 +43909,10 @@ func UpsampleNearest3dVec(input *Tensor, outputSize []int64, scaleFactors []floa scaleFactorsLen := len(scaleFactors) lib.AtgUpsampleNearest3dVec(ptr, input.ctensor, outputSize, outputSizeLen, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleNearest3dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleNearest3dVec") return retVal, err } @@ -41545,9 +43947,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleTrilinear3d() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleTrilinear3d") return retVal, err } @@ -41582,9 +43985,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleTrilinear3dBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleTrilinear3dBackward") return retVal, err } @@ -41619,9 +44023,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleTrilinear3dBackwardGradInput(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, outputSizeLen, inputSize, inputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleTrilinear3dBackwardGradInput() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleTrilinear3dBackwardGradInput") return retVal, err } @@ -41656,9 +44061,10 @@ var cscalesWVal float64 = 0.0 } lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, outputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleTrilinear3dOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleTrilinear3dOut") return retVal, err } @@ -41675,9 +44081,10 @@ calignCorners := int32(0) scaleFactorsLen := len(scaleFactors) lib.AtgUpsampleTrilinear3dVec(ptr, input.ctensor, outputSize, outputSizeLen, calignCorners, scaleFactors, scaleFactorsLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("UpsampleTrilinear3dVec() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "UpsampleTrilinear3dVec") return retVal, err } @@ -41693,9 +44100,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgValueSelectingReductionBackward(ptr, grad.ctensor, dim, indices.ctensor, sizes, sizesLen, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("ValueSelectingReductionBackward() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ValueSelectingReductionBackward") return retVal, err } @@ -41709,9 +44117,10 @@ func(ts *Tensor) Values(del bool)(retVal *Tensor, err error) { lib.AtgValues(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Values() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Values") return retVal, err } @@ -41725,9 +44134,10 @@ func(ts *Tensor) ValuesCopy(del bool)(retVal *Tensor, err error) { lib.AtgValuesCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ValuesCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ValuesCopy") return retVal, err } @@ -41741,9 +44151,10 @@ func(ts *Tensor) ValuesCopyOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgValuesCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ValuesCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ValuesCopyOut") return retVal, err } @@ -41764,9 +44175,10 @@ cincreasing := int32(0) if increasing { cincreasing = int32(1) } lib.AtgVander(ptr, x.ctensor, cnVal, cnNull, cincreasing) if err = TorchErr(); err != nil { + err = fmt.Errorf("Vander() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Vander") return retVal, err } @@ -41782,9 +44194,10 @@ func(ts *Tensor) Var(unbiased bool, del bool)(retVal *Tensor, err error) { if unbiased { cunbiased = int32(1) } lib.AtgVar(ptr, ts.ctensor, cunbiased) if err = TorchErr(); err != nil { + err = fmt.Errorf("Var() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Var") return retVal, err } @@ -41807,9 +44220,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgVarCorrection(ptr, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("VarCorrection() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "VarCorrection") return retVal, err } @@ -41832,9 +44246,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgVarCorrectionOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("VarCorrectionOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "VarCorrectionOut") return retVal, err } @@ -41853,9 +44268,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgVarDim(ptr, ts.ctensor, dim, dimLen, cunbiased, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("VarDim() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "VarDim") return retVal, err } @@ -41871,10 +44287,11 @@ func(ts *Tensor) VarMean(unbiased bool, del bool)(retVal0 *Tensor, retVal1 *Tens if unbiased { cunbiased = int32(1) } lib.AtgVarMean(ctensorPtr0, ts.ctensor, cunbiased) if err = TorchErr(); err != nil { + err = fmt.Errorf("VarMean() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "VarMean_0") + retVal1 = newTensor(*ctensorPtr1, "VarMean_1") return retVal0, retVal1, err } @@ -41897,10 +44314,11 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgVarMeanCorrection(ctensorPtr0, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("VarMeanCorrection() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "VarMeanCorrection_0") + retVal1 = newTensor(*ctensorPtr1, "VarMeanCorrection_1") return retVal0, retVal1, err } @@ -41923,10 +44341,11 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgVarMeanCorrectionOut(ctensorPtr0, out0.ctensor, out1.ctensor, ts.ctensor, dim, dimLen, ccorrectionVal, ccorrectionNull, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("VarMeanCorrectionOut() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "VarMeanCorrectionOut_0") + retVal1 = newTensor(*ctensorPtr1, "VarMeanCorrectionOut_1") return retVal0, retVal1, err } @@ -41945,10 +44364,11 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgVarMeanDim(ctensorPtr0, ts.ctensor, dim, dimLen, cunbiased, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("VarMeanDim() failed: %w", err) return retVal0, retVal1, err } - retVal0 = &Tensor{ctensor: *ctensorPtr0} - retVal1 = &Tensor{ctensor: *ctensorPtr1} + retVal0 = newTensor(*ctensorPtr0, "VarMeanDim_0") + retVal1 = newTensor(*ctensorPtr1, "VarMeanDim_1") return retVal0, retVal1, err } @@ -41967,9 +44387,10 @@ ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } lib.AtgVarOut(ptr, out.ctensor, ts.ctensor, dim, dimLen, cunbiased, ckeepdim) if err = TorchErr(); err != nil { + err = fmt.Errorf("VarOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "VarOut") return retVal, err } @@ -41983,9 +44404,10 @@ func(ts *Tensor) Vdot(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgVdot(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Vdot() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Vdot") return retVal, err } @@ -41999,9 +44421,10 @@ func(ts *Tensor) VdotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor, e lib.AtgVdotOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("VdotOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "VdotOut") return retVal, err } @@ -42016,9 +44439,10 @@ func(ts *Tensor) View(size []int64, del bool)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgView(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("View() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "View") return retVal, err } @@ -42032,9 +44456,10 @@ func(ts *Tensor) ViewAs(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgViewAs(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewAs() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewAs") return retVal, err } @@ -42048,9 +44473,10 @@ func(ts *Tensor) ViewAsComplex(del bool)(retVal *Tensor, err error) { lib.AtgViewAsComplex(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewAsComplex() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewAsComplex") return retVal, err } @@ -42064,9 +44490,10 @@ func(ts *Tensor) ViewAsComplexCopy(del bool)(retVal *Tensor, err error) { lib.AtgViewAsComplexCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewAsComplexCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewAsComplexCopy") return retVal, err } @@ -42080,9 +44507,10 @@ func(ts *Tensor) ViewAsComplexCopyOut(out *Tensor, del bool)(retVal *Tensor, err lib.AtgViewAsComplexCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewAsComplexCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewAsComplexCopyOut") return retVal, err } @@ -42096,9 +44524,10 @@ func(ts *Tensor) ViewAsReal(del bool)(retVal *Tensor, err error) { lib.AtgViewAsReal(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewAsReal() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewAsReal") return retVal, err } @@ -42112,9 +44541,10 @@ func(ts *Tensor) ViewAsRealCopy(del bool)(retVal *Tensor, err error) { lib.AtgViewAsRealCopy(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewAsRealCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewAsRealCopy") return retVal, err } @@ -42128,9 +44558,10 @@ func(ts *Tensor) ViewAsRealCopyOut(out *Tensor, del bool)(retVal *Tensor, err er lib.AtgViewAsRealCopyOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewAsRealCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewAsRealCopyOut") return retVal, err } @@ -42145,9 +44576,10 @@ func(ts *Tensor) ViewCopy(size []int64, del bool)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgViewCopy(ptr, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewCopy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewCopy") return retVal, err } @@ -42161,9 +44593,10 @@ func(ts *Tensor) ViewCopyDtype(dtype gotch.DType, del bool)(retVal *Tensor, err lib.AtgViewCopyDtype(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewCopyDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewCopyDtype") return retVal, err } @@ -42177,9 +44610,10 @@ func(ts *Tensor) ViewCopyDtypeOut(out *Tensor, dtype gotch.DType, del bool)(retV lib.AtgViewCopyDtypeOut(ptr, out.ctensor, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewCopyDtypeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewCopyDtypeOut") return retVal, err } @@ -42194,9 +44628,10 @@ func(ts *Tensor) ViewCopyOut(out *Tensor, size []int64, del bool)(retVal *Tensor sizeLen := len(size) lib.AtgViewCopyOut(ptr, out.ctensor, ts.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewCopyOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewCopyOut") return retVal, err } @@ -42210,9 +44645,10 @@ func(ts *Tensor) ViewDtype(dtype gotch.DType, del bool)(retVal *Tensor, err erro lib.AtgViewDtype(ptr, ts.ctensor, dtype.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("ViewDtype() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ViewDtype") return retVal, err } @@ -42227,9 +44663,10 @@ func Vstack(tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgVstack(ptr, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("Vstack() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Vstack") return retVal, err } @@ -42244,9 +44681,10 @@ func VstackOut(out *Tensor, tensors []*Tensor)(retVal *Tensor, err error) { for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} lib.AtgVstackOut(ptr, out.ctensor, ctensors, len(ctensors)) if err = TorchErr(); err != nil { + err = fmt.Errorf("VstackOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "VstackOut") return retVal, err } @@ -42259,9 +44697,10 @@ func WhereScalar(condition *Tensor, selfScalar *Scalar, other *Scalar)(retVal *T lib.AtgWhereScalar(ptr, condition.ctensor, selfScalar.cscalar, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("WhereScalar() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "WhereScalar") return retVal, err } @@ -42275,9 +44714,10 @@ func(ts *Tensor) WhereScalarother(condition *Tensor, other *Scalar, del bool)(re lib.AtgWhereScalarother(ptr, condition.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("WhereScalarother() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "WhereScalarother") return retVal, err } @@ -42290,9 +44730,10 @@ func WhereScalarself(condition *Tensor, selfScalar *Scalar, other *Tensor)(retVa lib.AtgWhereScalarself(ptr, condition.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("WhereScalarself() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "WhereScalarself") return retVal, err } @@ -42306,9 +44747,10 @@ func(ts *Tensor) WhereSelf(condition *Tensor, other *Tensor, del bool)(retVal *T lib.AtgWhereSelf(ptr, condition.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("WhereSelf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "WhereSelf") return retVal, err } @@ -42322,9 +44764,10 @@ func(ts *Tensor) WhereSelfOut(out *Tensor, condition *Tensor, other *Tensor, del lib.AtgWhereSelfOut(ptr, out.ctensor, condition.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("WhereSelfOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "WhereSelfOut") return retVal, err } @@ -42338,9 +44781,10 @@ func(ts *Tensor) Xlogy(other *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgXlogy(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Xlogy() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Xlogy") return retVal, err } @@ -42353,6 +44797,7 @@ func(ts *Tensor) Xlogy_(other *Tensor)(err error) { lib.AtgXlogy_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Xlogy_() failed: %w", err) return err } ts.ctensor = *ptr @@ -42369,9 +44814,10 @@ func(ts *Tensor) XlogyOutscalarOther(out *Tensor, other *Scalar, del bool)(retVa lib.AtgXlogyOutscalarOther(ptr, out.ctensor, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("XlogyOutscalarOther() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "XlogyOutscalarOther") return retVal, err } @@ -42384,9 +44830,10 @@ func XlogyOutscalarSelf(out *Tensor, selfScalar *Scalar, other *Tensor)(retVal * lib.AtgXlogyOutscalarSelf(ptr, out.ctensor, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("XlogyOutscalarSelf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "XlogyOutscalarSelf") return retVal, err } @@ -42400,9 +44847,10 @@ func(ts *Tensor) XlogyOuttensor(out *Tensor, other *Tensor, del bool)(retVal *Te lib.AtgXlogyOuttensor(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("XlogyOuttensor() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "XlogyOuttensor") return retVal, err } @@ -42416,9 +44864,10 @@ func(ts *Tensor) XlogyScalarOther(other *Scalar, del bool)(retVal *Tensor, err e lib.AtgXlogyScalarOther(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("XlogyScalarOther() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "XlogyScalarOther") return retVal, err } @@ -42431,6 +44880,7 @@ func(ts *Tensor) XlogyScalarOther_(other *Scalar)(err error) { lib.AtgXlogyScalarOther_(ptr, ts.ctensor, other.cscalar) if err = TorchErr(); err != nil { + err = fmt.Errorf("XlogyScalarOther_() failed: %w", err) return err } ts.ctensor = *ptr @@ -42446,9 +44896,10 @@ func XlogyScalarSelf(selfScalar *Scalar, other *Tensor)(retVal *Tensor, err erro lib.AtgXlogyScalarSelf(ptr, selfScalar.cscalar, other.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("XlogyScalarSelf() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "XlogyScalarSelf") return retVal, err } @@ -42462,9 +44913,10 @@ func(ts *Tensor) Zero(del bool)(retVal *Tensor, err error) { lib.AtgZero(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Zero() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Zero") return retVal, err } @@ -42477,6 +44929,7 @@ func(ts *Tensor) Zero_()(err error) { lib.AtgZero_(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("Zero_() failed: %w", err) return err } ts.ctensor = *ptr @@ -42493,9 +44946,10 @@ func(ts *Tensor) ZeroOut(out *Tensor, del bool)(retVal *Tensor, err error) { lib.AtgZeroOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ZeroOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ZeroOut") return retVal, err } @@ -42509,9 +44963,10 @@ func Zeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(re sizeLen := len(size) lib.AtgZeros(ptr, size, sizeLen, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { + err = fmt.Errorf("Zeros() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "Zeros") return retVal, err } @@ -42525,9 +44980,10 @@ func(ts *Tensor) ZerosLike(del bool)(retVal *Tensor, err error) { lib.AtgZerosLike(ptr, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ZerosLike() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ZerosLike") return retVal, err } @@ -42541,9 +44997,10 @@ func(ts *Tensor) ZerosLikeOut(out *Tensor, del bool)(retVal *Tensor, err error) lib.AtgZerosLikeOut(ptr, out.ctensor, ts.ctensor) if err = TorchErr(); err != nil { + err = fmt.Errorf("ZerosLikeOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ZerosLikeOut") return retVal, err } @@ -42557,9 +45014,10 @@ func ZerosOut(out *Tensor, size []int64)(retVal *Tensor, err error) { sizeLen := len(size) lib.AtgZerosOut(ptr, out.ctensor, size, sizeLen) if err = TorchErr(); err != nil { + err = fmt.Errorf("ZerosOut() failed: %w", err) return retVal, err } - retVal = &Tensor{ctensor: *ptr} + retVal = newTensor(*ptr, "ZerosOut") return retVal, err }